diff --git a/CHANGELOG/CHANGELOG-8.4.md b/CHANGELOG/CHANGELOG-8.4.md
new file mode 100644
index 000000000..4bff1cb6c
--- /dev/null
+++ b/CHANGELOG/CHANGELOG-8.4.md
@@ -0,0 +1,113 @@
+# Release notes for v8.4.0
+
+[Documentation](https://kubernetes-csi.github.io)
+
+# Changelog since v8.3.0
+
+## Changes by Kind
+
+### API Change
+
+- Introduce the `v1beta2` VolumeGroupSnapshot API as described by [KEP 5013](https://github.com/kubernetes/enhancements/pull/5013) ([#1312](https://github.com/kubernetes-csi/external-snapshotter/pull/1312), [@leonardoce](https://github.com/leonardoce))
+
+### Feature
+
+- The number of worker threads in the snapshot-controller and csi-snapshotter is now configurable via the `worker-threads` flag. ([#282](https://github.com/kubernetes-csi/external-snapshotter/pull/282), [@huffmanca](https://github.com/huffmanca))
+
+### Other (Cleanup or Flake)
+
+- Several VolumeGroupSnapshot, VolumeGroupSnapshotClass and VolumeGroupSnapshotContent fields are now immutable. The `v1beta1` VolumeGroupSnapshot API is now marked as deprecated. ([#1337](https://github.com/kubernetes-csi/external-snapshotter/pull/1337), [@leonardoce](https://github.com/leonardoce))
+- Update kubernetes dependencies to v1.34.0 ([#1330](https://github.com/kubernetes-csi/external-snapshotter/pull/1330), [@dobsonj](https://github.com/dobsonj))
+
+### Uncategorized
+
+- Update CSI spec to v1.12. ([#1341](https://github.com/kubernetes-csi/external-snapshotter/pull/1341), [@xing-yang](https://github.com/xing-yang))
+
+## Dependencies
+
+### Added
+- github.com/antihax/optional: [v1.0.0](https://github.com/antihax/optional/tree/v1.0.0)
+- github.com/envoyproxy/go-control-plane/envoy: [v1.32.4](https://github.com/envoyproxy/go-control-plane/tree/envoy/v1.32.4)
+- github.com/envoyproxy/go-control-plane/ratelimit: [v0.1.0](https://github.com/envoyproxy/go-control-plane/tree/ratelimit/v0.1.0)
+- github.com/go-jose/go-jose/v4: [v4.0.4](https://github.com/go-jose/go-jose/tree/v4.0.4)
+- github.com/godbus/dbus/v5: [v5.0.4](https://github.com/godbus/dbus/tree/v5.0.4)
+- github.com/golang-jwt/jwt/v5: [v5.2.2](https://github.com/golang-jwt/jwt/tree/v5.2.2)
+- github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus: [v1.0.1](https://github.com/grpc-ecosystem/go-grpc-middleware/tree/providers/prometheus/v1.0.1)
+- github.com/grpc-ecosystem/go-grpc-middleware/v2: [v2.3.0](https://github.com/grpc-ecosystem/go-grpc-middleware/tree/v2.3.0)
+- github.com/matttproud/golang_protobuf_extensions: [v1.0.1](https://github.com/matttproud/golang_protobuf_extensions/tree/v1.0.1)
+- github.com/rogpeppe/fastuuid: [v1.2.0](https://github.com/rogpeppe/fastuuid/tree/v1.2.0)
+- github.com/spiffe/go-spiffe/v2: [v2.5.0](https://github.com/spiffe/go-spiffe/tree/v2.5.0)
+- github.com/zeebo/errs: [v1.4.0](https://github.com/zeebo/errs/tree/v1.4.0)
+- go.etcd.io/raft/v3: v3.6.0
+- go.yaml.in/yaml/v2: v2.4.2
+- go.yaml.in/yaml/v3: v3.0.4
+- k8s.io/apiextensions-apiserver: v0.34.0
+- sigs.k8s.io/structured-merge-diff/v6: v6.3.0
+
+### Changed
+- cel.dev/expr: v0.19.1 → v0.24.0
+- cloud.google.com/go/compute/metadata: v0.5.2 → v0.6.0
+- github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp: [v1.24.2 → v1.26.0](https://github.com/GoogleCloudPlatform/opentelemetry-operations-go/compare/detectors/gcp/v1.24.2...detectors/gcp/v1.26.0)
+- github.com/cncf/xds/go: [b4127c9 → 2f00578](https://github.com/cncf/xds/compare/b4127c9...2f00578)
+- github.com/container-storage-interface/spec: [v1.11.0 → v1.12.0](https://github.com/container-storage-interface/spec/compare/v1.11.0...v1.12.0)
+- github.com/cpuguy83/go-md2man/v2: [v2.0.4 → v2.0.6](https://github.com/cpuguy83/go-md2man/compare/v2.0.4...v2.0.6)
+- github.com/emicklei/go-restful/v3: [v3.12.1 → v3.12.2](https://github.com/emicklei/go-restful/compare/v3.12.1...v3.12.2)
+- github.com/envoyproxy/go-control-plane: [v0.13.1 → v0.13.4](https://github.com/envoyproxy/go-control-plane/compare/v0.13.1...v0.13.4)
+- github.com/envoyproxy/protoc-gen-validate: [v1.1.0 → v1.2.1](https://github.com/envoyproxy/protoc-gen-validate/compare/v1.1.0...v1.2.1)
+- github.com/fsnotify/fsnotify: [v1.7.0 → v1.9.0](https://github.com/fsnotify/fsnotify/compare/v1.7.0...v1.9.0)
+- github.com/fxamacker/cbor/v2: [v2.7.0 → v2.9.0](https://github.com/fxamacker/cbor/compare/v2.7.0...v2.9.0)
+- github.com/golang/glog: [v1.2.2 → v1.2.4](https://github.com/golang/glog/compare/v1.2.2...v1.2.4)
+- github.com/google/cel-go: [v0.23.2 → v0.26.0](https://github.com/google/cel-go/compare/v0.23.2...v0.26.0)
+- github.com/google/gnostic-models: [v0.6.9 → v0.7.0](https://github.com/google/gnostic-models/compare/v0.6.9...v0.7.0)
+- github.com/google/pprof: [d1b30fe → 40e02aa](https://github.com/google/pprof/compare/d1b30fe...40e02aa)
+- github.com/grpc-ecosystem/grpc-gateway/v2: [v2.24.0 → v2.26.3](https://github.com/grpc-ecosystem/grpc-gateway/compare/v2.24.0...v2.26.3)
+- github.com/jonboulle/clockwork: [v0.4.0 → v0.5.0](https://github.com/jonboulle/clockwork/compare/v0.4.0...v0.5.0)
+- github.com/kubernetes-csi/csi-test/v5: [v5.3.1 → v5.4.0](https://github.com/kubernetes-csi/csi-test/compare/v5.3.1...v5.4.0)
+- github.com/modern-go/reflect2: [v1.0.2 → 35a7c28](https://github.com/modern-go/reflect2/compare/v1.0.2...35a7c28)
+- github.com/onsi/ginkgo/v2: [v2.21.0 → v2.22.0](https://github.com/onsi/ginkgo/compare/v2.21.0...v2.22.0)
+- github.com/onsi/gomega: [v1.35.1 → v1.36.1](https://github.com/onsi/gomega/compare/v1.35.1...v1.36.1)
+- github.com/spf13/cobra: [v1.8.1 → v1.9.1](https://github.com/spf13/cobra/compare/v1.8.1...v1.9.1)
+- github.com/spf13/pflag: [v1.0.5 → v1.0.6](https://github.com/spf13/pflag/compare/v1.0.5...v1.0.6)
+- go.etcd.io/bbolt: v1.3.11 → v1.4.2
+- go.etcd.io/etcd/api/v3: v3.5.21 → v3.6.4
+- go.etcd.io/etcd/client/pkg/v3: v3.5.21 → v3.6.4
+- go.etcd.io/etcd/client/v3: v3.5.21 → v3.6.4
+- go.etcd.io/etcd/pkg/v3: v3.5.21 → v3.6.4
+- go.etcd.io/etcd/server/v3: v3.5.21 → v3.6.4
+- go.opentelemetry.io/contrib/detectors/gcp: v1.31.0 → v1.34.0
+- go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc: v0.58.0 → v0.60.0
+- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc: v1.33.0 → v1.34.0
+- go.opentelemetry.io/otel/exporters/otlp/otlptrace: v1.33.0 → v1.34.0
+- go.opentelemetry.io/otel/metric: v1.33.0 → v1.35.0
+- go.opentelemetry.io/otel/sdk/metric: v1.31.0 → v1.34.0
+- go.opentelemetry.io/otel/sdk: v1.33.0 → v1.34.0
+- go.opentelemetry.io/otel/trace: v1.33.0 → v1.35.0
+- go.opentelemetry.io/otel: v1.33.0 → v1.35.0
+- go.opentelemetry.io/proto/otlp: v1.4.0 → v1.5.0
+- golang.org/x/mod: v0.20.0 → v0.21.0
+- golang.org/x/tools: v0.26.0 → v0.28.0
+- google.golang.org/genproto/googleapis/api: e6fa225 → a0af3ef
+- google.golang.org/genproto/googleapis/rpc: 9240e9c → a0af3ef
+- google.golang.org/grpc: v1.69.0 → v1.72.1
+- k8s.io/api: v0.33.0 → v0.34.0
+- k8s.io/apimachinery: v0.33.0 → v0.34.0
+- k8s.io/apiserver: v0.33.0 → v0.34.0
+- k8s.io/client-go: v0.33.0 → v0.34.0
+- k8s.io/code-generator: v0.33.0 → v0.34.0
+- k8s.io/component-base: v0.33.0 → v0.34.0
+- k8s.io/component-helpers: v0.33.0 → v0.34.0
+- k8s.io/gengo/v2: a7b603a → 85fd79d
+- k8s.io/kms: v0.33.0 → v0.34.0
+- k8s.io/kube-openapi: c8a335a → f3f2b99
+- k8s.io/utils: 24370be → 4c0f3b2
+- sigs.k8s.io/yaml: v1.4.0 → v1.6.0
+
+### Removed
+- github.com/census-instrumentation/opencensus-proto: [v0.4.1](https://github.com/census-instrumentation/opencensus-proto/tree/v0.4.1)
+- github.com/go-task/slim-sprig: [52ccab3](https://github.com/go-task/slim-sprig/tree/52ccab3)
+- github.com/golang-jwt/jwt/v4: [v4.5.2](https://github.com/golang-jwt/jwt/tree/v4.5.2)
+- github.com/grpc-ecosystem/go-grpc-middleware: [v1.3.0](https://github.com/grpc-ecosystem/go-grpc-middleware/tree/v1.3.0)
+- github.com/grpc-ecosystem/grpc-gateway: [v1.16.0](https://github.com/grpc-ecosystem/grpc-gateway/tree/v1.16.0)
+- go.etcd.io/etcd/client/v2: v2.305.21
+- go.etcd.io/etcd/raft/v3: v3.5.21
+- google.golang.org/genproto: ef43131
diff --git a/Makefile b/Makefile
index ec3520a51..2423e0038 100644
--- a/Makefile
+++ b/Makefile
@@ -12,9 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-.PHONY: all snapshot-controller csi-snapshotter clean test
+.PHONY: all snapshot-controller csi-snapshotter snapshot-conversion-webhook clean test
-CMDS=snapshot-controller csi-snapshotter
+CMDS=snapshot-controller csi-snapshotter snapshot-conversion-webhook
all: build
include release-tools/build.make
diff --git a/README.md b/README.md
index bb8f53ac3..12c842cea 100644
--- a/README.md
+++ b/README.md
@@ -6,8 +6,6 @@ The volume snapshot feature supports CSI v1.0 and higher. It was introduced as a
The volume group snapshot feature supports CSI v1.10.0 and higher, and have been introduced in [Kubernetes 1.27 as an alpha feature](https://kubernetes.io/blog/2023/05/08/kubernetes-1-27-volume-group-snapshot-alpha/).
-> :warning: **WARNING**: The validation webhook was deprecated in v8.0.0 and it is now removed. The validation webhook would prevent creating multiple default volume snapshot classes and multiple default volume group snapshot classes for the same CSI driver. With the removal of the validation webhook, an error will still be raised when dynamically provisioning a VolumeSnapshot or VolumeGroupSnapshot when multiple default volume snapshot classes or multiple default volume group snapshot classes for the same CSI driver exist.
-
## Overview
With the promotion of Volume Snapshot to GA, the feature is enabled by default on standard Kubernetes deployments and cannot be turned off.
@@ -22,15 +20,15 @@ Blog post for the Volume Group Snapshot Alpha feature can be found [here](https:
This information reflects the head of this branch.
-| Minimum CSI Version | Recommended CSI Version | Container Image | [Min K8s Version](https://kubernetes-csi.github.io/docs/kubernetes-compatibility.html#minimum-version) | [Recommended K8s Version](https://kubernetes-csi.github.io/docs/project-policies.html#recommended-version) |
-| ------------------------------------------------------------------------------------------ | ----------------------------| --------------- | --------------- | --------------- |
-| [CSI Spec v1.0.0](https://github.com/container-storage-interface/spec/releases/tag/v1.0.0) | [CSI Spec v1.5.0](https://github.com/container-storage-interface/spec/releases/tag/v1.5.0) | k8s.gcr.io/sig-storage/csi-snapshotter | 1.20 | 1.20 |
-| [CSI Spec v1.0.0](https://github.com/container-storage-interface/spec/releases/tag/v1.0.0) | [CSI Spec v1.5.0](https://github.com/container-storage-interface/spec/releases/tag/v1.5.0) | k8s.gcr.io/sig-storage/snapshot-controller | 1.20 | 1.20 |
-| [CSI Spec v1.0.0](https://github.com/container-storage-interface/spec/releases/tag/v1.0.0) | [CSI Spec v1.5.0](https://github.com/container-storage-interface/spec/releases/tag/v1.5.0) | k8s.gcr.io/sig-storage/snapshot-validation-webhook | 1.20 | 1.20 |
+| Minimum CSI Version | Recommended CSI Version | Container Image | [Min K8s Version](https://kubernetes-csi.github.io/docs/kubernetes-compatibility.html#minimum-version) | [Recommended K8s Version](https://kubernetes-csi.github.io/docs/project-policies.html#recommended-version) |
+| ------------------------------------------------------------------------------------------ | ----------------------------|----------------------------------------------------| --------------- | --------------- |
+| [CSI Spec v1.0.0](https://github.com/container-storage-interface/spec/releases/tag/v1.0.0) | [CSI Spec v1.5.0](https://github.com/container-storage-interface/spec/releases/tag/v1.5.0) | k8s.gcr.io/sig-storage/csi-snapshotter | 1.20 | 1.20 |
+| [CSI Spec v1.0.0](https://github.com/container-storage-interface/spec/releases/tag/v1.0.0) | [CSI Spec v1.5.0](https://github.com/container-storage-interface/spec/releases/tag/v1.5.0) | k8s.gcr.io/sig-storage/snapshot-controller | 1.20 | 1.20 |
+| [CSI Spec v1.0.0](https://github.com/container-storage-interface/spec/releases/tag/v1.0.0) | [CSI Spec v1.5.0](https://github.com/container-storage-interface/spec/releases/tag/v1.5.0) | k8s.gcr.io/sig-storage/snapshot-conversion-webhook | 1.20 | 1.20 |
-Note: snapshot-controller, csi-snapshotter v4.1 requires v1 snapshot CRDs to be installed, but it serves both v1 and v1beta1 snapshot objects. Storage version is changed from v1beta1 to v1 in 4.1.0 so v1beta1 is deprecated and will be removed in a future release.
+Note: snapshot-controller, snapshot-conversion-webhook, csi-snapshotter v4.1 requires v1 snapshot CRDs to be installed, but it serves both v1 and v1beta1 snapshot objects. Storage version is changed from v1beta1 to v1 in 4.1.0 so v1beta1 is deprecated and will be removed in a future release.
-Note: when the volume group snapshot feature is enabled, snapshot-controller, csi-snapshotter require the v1alpha1 volumegroupsnapshot CRDs to be installed.
+Note: when the volume group snapshot feature is enabled, snapshot-controller, snapshot-conversion-webhook, csi-snapshotter require the v1beta2 volumegroupsnapshot CRDs to be installed.
## Feature Status
@@ -48,9 +46,9 @@ The CSI external-snapshotter sidecar talks to CSI over socket (/run/csi/socket b
In the current release, both v1 and v1beta1 APIs are served while the stored API version is changed from v1beta1 to v1. v1beta1 APIs is deprecated and will be removed in a future release. It is recommended for users to switch to v1 APIs as soon as possible. Any previously created invalid v1beta1 objects have to be deleted before upgrading to version 4.1.
-### Volume Group Snapshot v1alpha1 APIs
+### Volume Group Snapshot v1beta2 APIs
-When enabled, the VolumeGroupSnapshot v1alpha1 APIs are being served.
+When enabled, the VolumeGroupSnapshot v1beta2 APIs are being served.
## Usage
@@ -58,6 +56,7 @@ Volume Snapshot feature contains the following components:
* [Kubernetes Volume Snapshot and Volume Group Snapshot CRDs](https://github.com/kubernetes-csi/external-snapshotter/tree/master/client/config/crd)
* [Volume snapshot and volume group snapshot controller](https://github.com/kubernetes-csi/external-snapshotter/tree/master/pkg/common-controller)
+* [Volume group snapshot conversion webhook](https://github.com/kubernetes-csi/external-snapshotter/tree/master/pkg/webhook)
* CSI Driver along with [CSI Snapshotter sidecar](https://github.com/kubernetes-csi/external-snapshotter/tree/master/pkg/sidecar-controller)
The Volume Snapshot feature depends on a volume snapshot controller and the volume snapshot CRDs. Both the controller and the CRDs are independent of any CSI driver. The CSI Snapshotter sidecar must run once per CSI driver. The single snapshot controller deployment works for all CSI drivers in a cluster. With leader election configured, the CSI sidecars and snapshot controller elect one leader per deployment. If deployed with two or more pods and leader election is enabled, the non-leader containers will attempt to get the lease. If the leader container dies, a non-leader will take over.
@@ -66,6 +65,8 @@ Therefore, it is strongly recommended that Kubernetes distributors bundle and de
If your Kubernetes distribution does not bundle the snapshot controller, you may manually install these components by executing the following steps. Note that the snapshot controller YAML files in the git repository deploy into the default namespace for system testing purposes. For general use, update the snapshot controller YAMLs with an appropriate namespace prior to installing. For example, on a Vanilla Kubernetes cluster update the namespace from 'default' to 'kube-system' prior to issuing the kubectl create command.
+There is a new conversion webhook server which provides conversion between v1beta1 and v1beta2 group snapshot objects. The cluster admin or Kubernetes distribution admin should install the webhook alongside the snapshot controllers and CRDs if they want to provide group snapshot v1beta1 API. More details [below](#conversion-webhook).).
+
Install Snapshot and Volume Group Snapshot CRDs:
* With the repo cloned locally: `kubectl kustomize client/config/crd | kubectl create -f -`
* From the repo remotely: `kubectl kustomize https://github.com/kubernetes-csi/external-snapshotter/client/config/crd | kubectl create -f -`
@@ -82,23 +83,22 @@ Install CSI Driver:
* With the repo cloned locally: `kubectl kustomize deploy/kubernetes/csi-snapshotter | kubectl create -f -`
* From the repo remotely: `kubectl kustomize https://github.com/kubernetes-csi/external-snapshotter/deploy/kubernetes/csi-snapshotter | kubectl create -f -`
-##### Volume Snapshot
+### Conversion Webhook
+
-* Spec.VolumeSnapshotClassName must not be an empty string or nil on creation
-* Spec.Source.PersistentVolumeClaimName must not be changed on update requests
-* Spec.Source.VolumeSnapshotContentName must not be changed on update requests
+The snapshot conversion webhook is an HTTP callback which responds to
+[conversion requests](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/#webhook-conversion),
+allowing the API server to convert between the VolumeGroupSnapshotContent v1beta1 API to and from the v1beta2 API.
-##### Volume Snapshot Content
+Read more about how to install the example webhook [here](deploy/kubernetes/webhook-example/README.md).
-* Spec.VolumeSnapshotRef.Name must not be an empty string on creation
-* Spec.VolumeSnapshotRef.Namespace must not be an empty string on creation
-* Spec.Source.VolumeHandle must not be changed on update requests
-* Spec.Source.SnapshotHandle must not be changed on update requests
-* Spec.SourceVolumeMode must not be changes on update requests
+#### Conversion Webhook Command Line Options
-##### Volume Snapshot Classes
+* `--tls-cert-file`: File containing the x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert). Required.
-* There can only be a single default volume snapshot class for a particular driver.
+* `--tls-private-key-file`: File containing the x509 private key matching --tls-cert-file. Required.
+
+* `--port`: Secure port that the webhook listens on (default 443)
### Distributed Snapshotting
@@ -242,6 +242,8 @@ If you have already deployed v1alpha1 snapshot APIs and external-snapshotter sid
### Upgrade from v1beta1 to v1
+Validation webhook should be installed before upgrading to v1. Potential impacts of not installing the validation webhook before upgrading to v1 include being unable to delete invalid snapshot objects. See the section on Validation Webhook for details.
+
* When upgrading to 4.0, change from v1beta1 to v1 is backward compatible because both v1 and v1beta1 are served while the stored API version is still v1beta1. Future releases will switch the stored version to v1 and gradually remove v1beta1 support.
* When upgrading from 3.x to 4.1, change from v1beta1 to v1 is no longer backward compatible because stored API version is changed to v1 although both v1 and v1beta1 are still served. v1beta1 is deprecated in 4.1.
* v1beta1 support will be removed in a future release. It is recommended for users to switch to v1 as soon as possible. Any previously created invalid v1beta1 objects have to be deleted before upgrading to version 4.1.
diff --git a/client/apis/volumegroupsnapshot/v1beta1/types.go b/client/apis/volumegroupsnapshot/v1beta1/types.go
index da0540bcf..187c23f78 100644
--- a/client/apis/volumegroupsnapshot/v1beta1/types.go
+++ b/client/apis/volumegroupsnapshot/v1beta1/types.go
@@ -119,6 +119,7 @@ type VolumeGroupSnapshotStatus struct {
// +kubebuilder:object:root=true
// +kubebuilder:resource:scope=Namespaced,shortName=vgs
// +kubebuilder:subresource:status
+// +kubebuilder:deprecatedversion
// +kubebuilder:printcolumn:name="ReadyToUse",type=boolean,JSONPath=`.status.readyToUse`,description="Indicates if all the individual snapshots in the group are ready to be used to restore a group of volumes."
// +kubebuilder:printcolumn:name="VolumeGroupSnapshotClass",type=string,JSONPath=`.spec.volumeGroupSnapshotClassName`,description="The name of the VolumeGroupSnapshotClass requested by the VolumeGroupSnapshot."
// +kubebuilder:printcolumn:name="VolumeGroupSnapshotContent",type=string,JSONPath=`.status.boundVolumeGroupSnapshotContentName`,description="Name of the VolumeGroupSnapshotContent object to which the VolumeGroupSnapshot object intends to bind to. Please note that verification of binding actually requires checking both VolumeGroupSnapshot and VolumeGroupSnapshotContent to ensure both are pointing at each other. Binding MUST be verified prior to usage of this object."
@@ -162,6 +163,7 @@ type VolumeGroupSnapshotList struct {
// is used by specifying its name in a VolumeGroupSnapshot object.
// VolumeGroupSnapshotClasses are non-namespaced.
// +kubebuilder:object:root=true
+// +kubebuilder:deprecatedversion
// +kubebuilder:resource:scope=Cluster,shortName=vgsclass;vgsclasses
// +kubebuilder:printcolumn:name="Driver",type=string,JSONPath=`.driver`
// +kubebuilder:printcolumn:name="DeletionPolicy",type=string,JSONPath=`.deletionPolicy`,description="Determines whether a VolumeGroupSnapshotContent created through the VolumeGroupSnapshotClass should be deleted when its bound VolumeGroupSnapshot is deleted."
@@ -218,6 +220,7 @@ type VolumeGroupSnapshotClassList struct {
// in the underlying storage system
// +kubebuilder:object:root=true
// +kubebuilder:resource:scope=Cluster,shortName=vgsc;vgscs
+// +kubebuilder:deprecatedversion
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="ReadyToUse",type=boolean,JSONPath=`.status.readyToUse`,description="Indicates if all the individual snapshots in the group are ready to be used to restore a group of volumes."
// +kubebuilder:printcolumn:name="DeletionPolicy",type=string,JSONPath=`.spec.deletionPolicy`,description="Determines whether this VolumeGroupSnapshotContent and its physical group snapshot on the underlying storage system should be deleted when its bound VolumeGroupSnapshot is deleted."
diff --git a/client/apis/volumegroupsnapshot/v1beta2/doc.go b/client/apis/volumegroupsnapshot/v1beta2/doc.go
new file mode 100644
index 000000000..7e20f8d77
--- /dev/null
+++ b/client/apis/volumegroupsnapshot/v1beta2/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +groupName=groupsnapshot.storage.k8s.io
+
+package v1beta2
diff --git a/client/apis/volumegroupsnapshot/v1beta2/register.go b/client/apis/volumegroupsnapshot/v1beta2/register.go
new file mode 100644
index 000000000..d982a1f61
--- /dev/null
+++ b/client/apis/volumegroupsnapshot/v1beta2/register.go
@@ -0,0 +1,57 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta2
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package.
+const GroupName = "groupsnapshot.storage.k8s.io"
+
+var (
+ // SchemeBuilder is the new scheme builder
+ SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+ // AddToScheme adds to scheme
+ AddToScheme = SchemeBuilder.AddToScheme
+ // SchemeGroupVersion is the group version used to register these objects.
+ SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta2"}
+)
+
+func Resource(resource string) schema.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+func init() {
+ // We only register manually written functions here. The registration of the
+ // generated functions takes place in the generated files. The separation
+ // makes the code compile even when the generated files are missing.
+ SchemeBuilder.Register(addKnownTypes)
+}
+
+// addKnownTypes adds the set of types defined in this package to the supplied scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &VolumeGroupSnapshotClass{},
+ &VolumeGroupSnapshotClassList{},
+ &VolumeGroupSnapshot{},
+ &VolumeGroupSnapshotList{},
+ &VolumeGroupSnapshotContent{},
+ &VolumeGroupSnapshotContentList{},
+ )
+ metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+ return nil
+}
diff --git a/client/apis/volumegroupsnapshot/v1beta2/types.go b/client/apis/volumegroupsnapshot/v1beta2/types.go
new file mode 100644
index 000000000..5adb5cf29
--- /dev/null
+++ b/client/apis/volumegroupsnapshot/v1beta2/types.go
@@ -0,0 +1,444 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+// +kubebuilder:object:generate=true
+package v1beta2
+
+import (
+ core_v1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ snapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
+)
+
+// VolumeGroupSnapshotSpec defines the desired state of a volume group snapshot.
+type VolumeGroupSnapshotSpec struct {
+ // Source specifies where a group snapshot will be created from.
+ // This field is immutable after creation.
+ // Required.
+ Source VolumeGroupSnapshotSource `json:"source" protobuf:"bytes,1,opt,name=source"`
+
+ // VolumeGroupSnapshotClassName is the name of the VolumeGroupSnapshotClass
+ // requested by the VolumeGroupSnapshot.
+ // VolumeGroupSnapshotClassName may be left nil to indicate that the default
+ // class will be used.
+ // Empty string is not allowed for this field.
+ // +optional
+ // +kubebuilder:validation:XValidation:rule="size(self) > 0",message="volumeGroupSnapshotClassName must not be the empty string when set"
+ VolumeGroupSnapshotClassName *string `json:"volumeGroupSnapshotClassName,omitempty" protobuf:"bytes,2,opt,name=volumeGroupSnapshotClassName"`
+}
+
+// VolumeGroupSnapshotSource specifies whether the underlying group snapshot should be
+// dynamically taken upon creation or if a pre-existing VolumeGroupSnapshotContent
+// object should be used.
+// Exactly one of its members must be set.
+// Members in VolumeGroupSnapshotSource are immutable.
+// +kubebuilder:validation:XValidation:rule="!has(oldSelf.selector) || has(self.selector)", message="selector is required once set"
+// +kubebuilder:validation:XValidation:rule="!has(oldSelf.volumeGroupSnapshotContentName) || has(self.volumeGroupSnapshotContentName)", message="volumeGroupSnapshotContentName is required once set"
+// +kubebuilder:validation:XValidation:rule="(has(self.selector) && !has(self.volumeGroupSnapshotContentName)) || (!has(self.selector) && has(self.volumeGroupSnapshotContentName))", message="exactly one of selector and volumeGroupSnapshotContentName must be set"
+type VolumeGroupSnapshotSource struct {
+ // Selector is a label query over persistent volume claims that are to be
+ // grouped together for snapshotting.
+ // This labelSelector will be used to match the label added to a PVC.
+ // If the label is added or removed to a volume after a group snapshot
+ // is created, the existing group snapshots won't be modified.
+ // Once a VolumeGroupSnapshotContent is created and the sidecar starts to process
+ // it, the volume list will not change with retries.
+ // +optional
+ // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="selector is immutable"
+ Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,1,opt,name=selector"`
+
+ // VolumeGroupSnapshotContentName specifies the name of a pre-existing VolumeGroupSnapshotContent
+ // object representing an existing volume group snapshot.
+ // This field should be set if the volume group snapshot already exists and
+ // only needs a representation in Kubernetes.
+ // This field is immutable.
+ // +optional
+ // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="volumeGroupSnapshotContentName is immutable"
+ VolumeGroupSnapshotContentName *string `json:"volumeGroupSnapshotContentName,omitempty" protobuf:"bytes,2,opt,name=volumeGroupSnapshotContentName"`
+}
+
+// VolumeGroupSnapshotStatus defines the observed state of volume group snapshot.
+type VolumeGroupSnapshotStatus struct {
+ // BoundVolumeGroupSnapshotContentName is the name of the VolumeGroupSnapshotContent
+ // object to which this VolumeGroupSnapshot object intends to bind to.
+ // If not specified, it indicates that the VolumeGroupSnapshot object has not
+ // been successfully bound to a VolumeGroupSnapshotContent object yet.
+ // NOTE: To avoid possible security issues, consumers must verify binding between
+ // VolumeGroupSnapshot and VolumeGroupSnapshotContent objects is successful
+ // (by validating that both VolumeGroupSnapshot and VolumeGroupSnapshotContent
+ // point at each other) before using this object.
+ // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="boundVolumeGroupSnapshotContentName is immutable once set"
+ // +optional
+ BoundVolumeGroupSnapshotContentName *string `json:"boundVolumeGroupSnapshotContentName,omitempty" protobuf:"bytes,1,opt,name=boundVolumeGroupSnapshotContentName"`
+
+ // CreationTime is the timestamp when the point-in-time group snapshot is taken
+ // by the underlying storage system.
+ // If not specified, it may indicate that the creation time of the group snapshot
+ // is unknown.
+ // This field is updated based on the CreationTime field in VolumeGroupSnapshotContentStatus
+ // +optional
+ CreationTime *metav1.Time `json:"creationTime,omitempty" protobuf:"bytes,2,opt,name=creationTime"`
+
+ // ReadyToUse indicates if all the individual snapshots in the group are ready
+ // to be used to restore a group of volumes.
+ // ReadyToUse becomes true when ReadyToUse of all individual snapshots become true.
+ // If not specified, it means the readiness of a group snapshot is unknown.
+ // +optional
+ ReadyToUse *bool `json:"readyToUse,omitempty" protobuf:"varint,3,opt,name=readyToUse"`
+
+ // Error is the last observed error during group snapshot creation, if any.
+ // This field could be helpful to upper level controllers (i.e., application
+ // controller) to decide whether they should continue on waiting for the group
+ // snapshot to be created based on the type of error reported.
+ // The snapshot controller will keep retrying when an error occurs during the
+ // group snapshot creation. Upon success, this error field will be cleared.
+ // +optional
+ Error *snapshotv1.VolumeSnapshotError `json:"error,omitempty" protobuf:"bytes,4,opt,name=error,casttype=VolumeSnapshotError"`
+}
+
+//+genclient
+//+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// VolumeGroupSnapshot is a user's request for creating either a point-in-time
+// group snapshot or binding to a pre-existing group snapshot.
+// +kubebuilder:object:root=true
+// +kubebuilder:storageversion
+// +kubebuilder:resource:scope=Namespaced,shortName=vgs
+// +kubebuilder:subresource:status
+// +kubebuilder:printcolumn:name="ReadyToUse",type=boolean,JSONPath=`.status.readyToUse`,description="Indicates if all the individual snapshots in the group are ready to be used to restore a group of volumes."
+// +kubebuilder:printcolumn:name="VolumeGroupSnapshotClass",type=string,JSONPath=`.spec.volumeGroupSnapshotClassName`,description="The name of the VolumeGroupSnapshotClass requested by the VolumeGroupSnapshot."
+// +kubebuilder:printcolumn:name="VolumeGroupSnapshotContent",type=string,JSONPath=`.status.boundVolumeGroupSnapshotContentName`,description="Name of the VolumeGroupSnapshotContent object to which the VolumeGroupSnapshot object intends to bind to. Please note that verification of binding actually requires checking both VolumeGroupSnapshot and VolumeGroupSnapshotContent to ensure both are pointing at each other. Binding MUST be verified prior to usage of this object."
+// +kubebuilder:printcolumn:name="CreationTime",type=date,JSONPath=`.status.creationTime`,description="Timestamp when the point-in-time group snapshot was taken by the underlying storage system."
+// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp`
+type VolumeGroupSnapshot struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Spec defines the desired characteristics of a group snapshot requested by a user.
+ // Required.
+ Spec VolumeGroupSnapshotSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
+ // Status represents the current information of a group snapshot.
+ // Consumers must verify binding between VolumeGroupSnapshot and
+ // VolumeGroupSnapshotContent objects is successful (by validating that both
+ // VolumeGroupSnapshot and VolumeGroupSnapshotContent point to each other) before
+ // using this object.
+ // +optional
+ Status *VolumeGroupSnapshotStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// VolumeGroupSnapshotList contains a list of VolumeGroupSnapshot objects.
+type VolumeGroupSnapshotList struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // Items is the list of VolumeGroupSnapshots.
+ Items []VolumeGroupSnapshot `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+//+genclient
+//+genclient:nonNamespaced
+//+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// VolumeGroupSnapshotClass specifies parameters that a underlying storage system
+// uses when creating a volume group snapshot. A specific VolumeGroupSnapshotClass
+// is used by specifying its name in a VolumeGroupSnapshot object.
+// VolumeGroupSnapshotClasses are non-namespaced.
+// +kubebuilder:object:root=true
+// +kubebuilder:storageversion
+// +kubebuilder:resource:scope=Cluster,shortName=vgsclass;vgsclasses
+// +kubebuilder:printcolumn:name="Driver",type=string,JSONPath=`.driver`
+// +kubebuilder:printcolumn:name="DeletionPolicy",type=string,JSONPath=`.deletionPolicy`,description="Determines whether a VolumeGroupSnapshotContent created through the VolumeGroupSnapshotClass should be deleted when its bound VolumeGroupSnapshot is deleted."
+// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp`
+type VolumeGroupSnapshotClass struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Driver is the name of the storage driver expected to handle this VolumeGroupSnapshotClass.
+ // Required.
+ // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="driver is immutable once set"
+ Driver string `json:"driver" protobuf:"bytes,2,opt,name=driver"`
+
+ // Parameters is a key-value map with storage driver specific parameters for
+ // creating group snapshots.
+ // These values are opaque to Kubernetes and are passed directly to the driver.
+ // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="parameters are immutable once set"
+ // +optional
+ Parameters map[string]string `json:"parameters,omitempty" protobuf:"bytes,3,rep,name=parameters"`
+
+ // DeletionPolicy determines whether a VolumeGroupSnapshotContent created
+ // through the VolumeGroupSnapshotClass should be deleted when its bound
+ // VolumeGroupSnapshot is deleted.
+ // Supported values are "Retain" and "Delete".
+ // "Retain" means that the VolumeGroupSnapshotContent and its physical group
+ // snapshot on underlying storage system are kept.
+ // "Delete" means that the VolumeGroupSnapshotContent and its physical group
+ // snapshot on underlying storage system are deleted.
+ // Required.
+ // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="deletionPolicy is immutable once set"
+ DeletionPolicy snapshotv1.DeletionPolicy `json:"deletionPolicy" protobuf:"bytes,4,opt,name=deletionPolicy"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// VolumeGroupSnapshotClassList is a collection of VolumeGroupSnapshotClasses.
+// +kubebuilder:object:root=true
+type VolumeGroupSnapshotClassList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard list metadata
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is the list of VolumeGroupSnapshotClasses.
+ Items []VolumeGroupSnapshotClass `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// VolumeGroupSnapshotContent represents the actual "on-disk" group snapshot object
+// in the underlying storage system
+// +kubebuilder:object:root=true
+// +kubebuilder:storageversion
+// +kubebuilder:resource:scope=Cluster,shortName=vgsc;vgscs
+// +kubebuilder:subresource:status
+// +kubebuilder:printcolumn:name="ReadyToUse",type=boolean,JSONPath=`.status.readyToUse`,description="Indicates if all the individual snapshots in the group are ready to be used to restore a group of volumes."
+// +kubebuilder:printcolumn:name="DeletionPolicy",type=string,JSONPath=`.spec.deletionPolicy`,description="Determines whether this VolumeGroupSnapshotContent and its physical group snapshot on the underlying storage system should be deleted when its bound VolumeGroupSnapshot is deleted."
+// +kubebuilder:printcolumn:name="Driver",type=string,JSONPath=`.spec.driver`,description="Name of the CSI driver used to create the physical group snapshot on the underlying storage system."
+// +kubebuilder:printcolumn:name="VolumeGroupSnapshotClass",type=string,JSONPath=`.spec.volumeGroupSnapshotClassName`,description="Name of the VolumeGroupSnapshotClass from which this group snapshot was (or will be) created."
+// +kubebuilder:printcolumn:name="VolumeGroupSnapshotNamespace",type=string,JSONPath=`.spec.volumeGroupSnapshotRef.namespace`,description="Namespace of the VolumeGroupSnapshot object to which this VolumeGroupSnapshotContent object is bound."
+// +kubebuilder:printcolumn:name="VolumeGroupSnapshot",type=string,JSONPath=`.spec.volumeGroupSnapshotRef.name`,description="Name of the VolumeGroupSnapshot object to which this VolumeGroupSnapshotContent object is bound."
+// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp`
+type VolumeGroupSnapshotContent struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard list metadata
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Spec defines properties of a VolumeGroupSnapshotContent created by the underlying storage system.
+ // Required.
+ Spec VolumeGroupSnapshotContentSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
+ // status represents the current information of a group snapshot.
+ // +optional
+ Status *VolumeGroupSnapshotContentStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// VolumeGroupSnapshotContentList is a list of VolumeGroupSnapshotContent objects
+// +kubebuilder:object:root=true
+type VolumeGroupSnapshotContentList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard list metadata
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is the list of VolumeGroupSnapshotContents.
+ Items []VolumeGroupSnapshotContent `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// VolumeGroupSnapshotContentSpec describes the common attributes of a group snapshot content
+type VolumeGroupSnapshotContentSpec struct {
+ // VolumeGroupSnapshotRef specifies the VolumeGroupSnapshot object to which this
+ // VolumeGroupSnapshotContent object is bound.
+ // VolumeGroupSnapshot.Spec.VolumeGroupSnapshotContentName field must reference to
+ // this VolumeGroupSnapshotContent's name for the bidirectional binding to be valid.
+ // For a pre-existing VolumeGroupSnapshotContent object, name and namespace of the
+ // VolumeGroupSnapshot object MUST be provided for binding to happen.
+ // This field is immutable after creation.
+ // Required.
+ // +kubebuilder:validation:XValidation:rule="has(self.name) && has(self.__namespace__)",message="both volumeGroupSnapshotRef.name and volumeGroupSnapshotRef.namespace must be set"
+ // +kubebuilder:validation:XValidation:rule="self.name == oldSelf.name && self.__namespace__ == oldSelf.__namespace__",message="volumeGroupSnapshotRef.name and volumeGroupSnapshotRef.namespace are immutable"
+ // +kubebuilder:validation:XValidation:rule="!has(oldSelf.uid) || (has(self.uid) && self.uid == oldSelf.uid)",message="volumeGroupSnapshotRef.uid is immutable once set"
+ VolumeGroupSnapshotRef core_v1.ObjectReference `json:"volumeGroupSnapshotRef" protobuf:"bytes,1,opt,name=volumeGroupSnapshotRef"`
+
+ // DeletionPolicy determines whether this VolumeGroupSnapshotContent and the
+ // physical group snapshot on the underlying storage system should be deleted
+ // when the bound VolumeGroupSnapshot is deleted.
+ // Supported values are "Retain" and "Delete".
+ // "Retain" means that the VolumeGroupSnapshotContent and its physical group
+ // snapshot on underlying storage system are kept.
+ // "Delete" means that the VolumeGroupSnapshotContent and its physical group
+ // snapshot on underlying storage system are deleted.
+ // For dynamically provisioned group snapshots, this field will automatically
+ // be filled in by the CSI snapshotter sidecar with the "DeletionPolicy" field
+ // defined in the corresponding VolumeGroupSnapshotClass.
+ // For pre-existing snapshots, users MUST specify this field when creating the
+ // VolumeGroupSnapshotContent object.
+ // Required.
+ DeletionPolicy snapshotv1.DeletionPolicy `json:"deletionPolicy" protobuf:"bytes,2,opt,name=deletionPolicy"`
+
+ // Driver is the name of the CSI driver used to create the physical group snapshot on
+ // the underlying storage system.
+ // This MUST be the same as the name returned by the CSI GetPluginName() call for
+ // that driver.
+ // Required.
+ // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="driver is immutable once set"
+ Driver string `json:"driver" protobuf:"bytes,3,opt,name=driver"`
+
+ // VolumeGroupSnapshotClassName is the name of the VolumeGroupSnapshotClass from
+ // which this group snapshot was (or will be) created.
+ // Note that after provisioning, the VolumeGroupSnapshotClass may be deleted or
+ // recreated with different set of values, and as such, should not be referenced
+ // post-snapshot creation.
+ // For dynamic provisioning, this field must be set.
+ // This field may be unset for pre-provisioned snapshots.
+ // +optional
+ // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="volumeGroupSnapshotClassName is immutable once set"
+ VolumeGroupSnapshotClassName *string `json:"volumeGroupSnapshotClassName,omitempty" protobuf:"bytes,4,opt,name=volumeGroupSnapshotClassName"`
+
+ // Source specifies whether the snapshot is (or should be) dynamically provisioned
+ // or already exists, and just requires a Kubernetes object representation.
+ // This field is immutable after creation.
+ // Required.
+ Source VolumeGroupSnapshotContentSource `json:"source" protobuf:"bytes,5,opt,name=source"`
+}
+
+// The VolumeSnapshotInfo struct is added in v1beta2
+// VolumeSnapshotInfo contains information for a snapshot
+type VolumeSnapshotInfo struct {
+ // VolumeHandle specifies the CSI "volume_id" of the volume from which this snapshot
+ // was taken from.
+ VolumeHandle string `json:"volumeHandle,omitempty" protobuf:"bytes,1,opt,name=volumeHandle"`
+
+ // SnapshotHandle is the CSI "snapshot_id" of this snapshot on the underlying storage system.
+ SnapshotHandle string `json:"snapshotHandle,omitempty" protobuf:"bytes,2,opt,name=snapshotHandle"`
+
+ // creationTime is the timestamp when the point-in-time snapshot is taken
+ // by the underlying storage system.
+ // +optional
+ CreationTime *int64 `json:"creationTime,omitempty" protobuf:"varint,3,opt,name=creationTime"`
+
+ // ReadyToUse indicates if the snapshot is ready to be used to restore a volume.
+ // +optional
+ ReadyToUse *bool `json:"readyToUse,omitempty" protobuf:"varint,4,opt,name=readyToUse"`
+
+ // RestoreSize represents the minimum size of volume required to create a volume
+ // from this snapshot.
+ // +optional
+ RestoreSize *int64 `json:"restoreSize,omitempty" protobuf:"bytes,5,opt,name=restoreSize"`
+}
+
+// VolumeGroupSnapshotContentStatus defines the observed state of VolumeGroupSnapshotContent.
+type VolumeGroupSnapshotContentStatus struct {
+ // VolumeGroupSnapshotHandle is a unique id returned by the CSI driver
+ // to identify the VolumeGroupSnapshot on the storage system.
+ // If a storage system does not provide such an id, the
+ // CSI driver can choose to return the VolumeGroupSnapshot name.
+ // +optional
+ // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="volumeGroupSnapshotHandle is immutable once set"
+ VolumeGroupSnapshotHandle *string `json:"volumeGroupSnapshotHandle,omitempty" protobuf:"bytes,1,opt,name=volumeGroupSnapshotHandle"`
+
+ // CreationTime is the timestamp when the point-in-time group snapshot is taken
+ // by the underlying storage system.
+ // If not specified, it indicates the creation time is unknown.
+ // If not specified, it means the readiness of a group snapshot is unknown.
+ // This field is the source for the CreationTime field in VolumeGroupSnapshotStatus
+ // +optional
+ CreationTime *metav1.Time `json:"creationTime,omitempty" protobuf:"bytes,2,opt,name=creationTime"`
+
+ // ReadyToUse indicates if all the individual snapshots in the group are ready to be
+ // used to restore a group of volumes.
+ // ReadyToUse becomes true when ReadyToUse of all individual snapshots become true.
+ // +optional
+ ReadyToUse *bool `json:"readyToUse,omitempty" protobuf:"varint,3,opt,name=readyToUse"`
+
+ // Error is the last observed error during group snapshot creation, if any.
+ // Upon success after retry, this error field will be cleared.
+ // +optional
+ Error *snapshotv1.VolumeSnapshotError `json:"error,omitempty" protobuf:"bytes,4,opt,name=error,casttype=VolumeSnapshotError"`
+
+ // This field is introduced in v1beta1 but removed in v1beta2
+ // It is replaced by VolumeSnapshotInfoList
+ // Information in this field from an existing v1beta1 API object
+ // will be copied to VolumeSnapshotInfoList by the conversion logic
+ //
+ // VolumeSnapshotHandlePairList is a list of CSI "volume_id" and "snapshot_id"
+ // pair returned by the CSI driver to identify snapshots and their source volumes
+ // on the storage system.
+ // +optional
+ // VolumeSnapshotHandlePairList []VolumeSnapshotHandlePair `json:"volumeSnapshotHandlePairList,omitempty" protobuf:"bytes,6,opt,name=volumeSnapshotHandlePairList"`
+
+ // This field is introduced in v1beta2
+ // It is replacing VolumeSnapshotHandlePairList
+ // VolumeSnapshotInfoList is a list of snapshot information returned by
+ // by the CSI driver to identify snapshots on the storage system.
+ // +optional
+ VolumeSnapshotInfoList []VolumeSnapshotInfo `json:"volumeSnapshotInfoList,omitempty" protobuf:"bytes,5,opt,name=volumeSnapshotInfo"`
+}
+
+// VolumeGroupSnapshotContentSource represents the CSI source of a group snapshot.
+// Exactly one of its members must be set.
+// Members in VolumeGroupSnapshotContentSource are immutable.
+// +kubebuilder:validation:XValidation:rule="!has(oldSelf.volumeHandles) || has(self.volumeHandles)", message="volumeHandles is required once set"
+// +kubebuilder:validation:XValidation:rule="!has(oldSelf.groupSnapshotHandles) || has(self.groupSnapshotHandles)", message="groupSnapshotHandles is required once set"
+// +kubebuilder:validation:XValidation:rule="(has(self.volumeHandles) && !has(self.groupSnapshotHandles)) || (!has(self.volumeHandles) && has(self.groupSnapshotHandles))", message="exactly one of volumeHandles and groupSnapshotHandles must be set"
+type VolumeGroupSnapshotContentSource struct {
+ // VolumeHandles is a list of volume handles on the backend to be snapshotted
+ // together. It is specified for dynamic provisioning of the VolumeGroupSnapshot.
+ // This field is immutable.
+ // +optional
+ // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="volumeHandles is immutable"
+ VolumeHandles []string `json:"volumeHandles,omitempty" protobuf:"bytes,1,opt,name=volumeHandles"`
+
+ // GroupSnapshotHandles specifies the CSI "group_snapshot_id" of a pre-existing
+ // group snapshot and a list of CSI "snapshot_id" of pre-existing snapshots
+ // on the underlying storage system for which a Kubernetes object
+ // representation was (or should be) created.
+ // This field is immutable.
+ // +optional
+ // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="groupSnapshotHandles is immutable"
+ GroupSnapshotHandles *GroupSnapshotHandles `json:"groupSnapshotHandles,omitempty" protobuf:"bytes,2,opt,name=groupSnapshotHandles"`
+}
+
+type GroupSnapshotHandles struct {
+ // VolumeGroupSnapshotHandle specifies the CSI "group_snapshot_id" of a pre-existing
+ // group snapshot on the underlying storage system for which a Kubernetes object
+ // representation was (or should be) created.
+ // This field is immutable.
+ // Required.
+ VolumeGroupSnapshotHandle string `json:"volumeGroupSnapshotHandle" protobuf:"bytes,1,opt,name=volumeGroupSnapshotHandle"`
+
+ // VolumeSnapshotHandles is a list of CSI "snapshot_id" of pre-existing
+ // snapshots on the underlying storage system for which Kubernetes objects
+ // representation were (or should be) created.
+ // This field is immutable.
+ // Required.
+ VolumeSnapshotHandles []string `json:"volumeSnapshotHandles" protobuf:"bytes,2,opt,name=volumeSnapshotHandles"`
+}
+
+// VolumeSnapshotHandlePair defines a pair of a source volume handle and a snapshot handle
+type VolumeSnapshotHandlePair struct {
+ // VolumeHandle is a unique id returned by the CSI driver to identify a volume
+ // on the storage system
+ // Required.
+ VolumeHandle string `json:"volumeHandle" protobuf:"bytes,1,opt,name=volumeHandle"`
+
+ // SnapshotHandle is a unique id returned by the CSI driver to identify a volume
+ // snapshot on the storage system
+ // Required.
+ SnapshotHandle string `json:"snapshotHandle" protobuf:"bytes,2,opt,name=snapshotHandle"`
+}
diff --git a/client/apis/volumegroupsnapshot/v1beta2/zz_generated.deepcopy.go b/client/apis/volumegroupsnapshot/v1beta2/zz_generated.deepcopy.go
new file mode 100644
index 000000000..05729d72a
--- /dev/null
+++ b/client/apis/volumegroupsnapshot/v1beta2/zz_generated.deepcopy.go
@@ -0,0 +1,466 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1beta2
+
+import (
+ v1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GroupSnapshotHandles) DeepCopyInto(out *GroupSnapshotHandles) {
+ *out = *in
+ if in.VolumeSnapshotHandles != nil {
+ in, out := &in.VolumeSnapshotHandles, &out.VolumeSnapshotHandles
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupSnapshotHandles.
+func (in *GroupSnapshotHandles) DeepCopy() *GroupSnapshotHandles {
+ if in == nil {
+ return nil
+ }
+ out := new(GroupSnapshotHandles)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VolumeGroupSnapshot) DeepCopyInto(out *VolumeGroupSnapshot) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ if in.Status != nil {
+ in, out := &in.Status, &out.Status
+ *out = new(VolumeGroupSnapshotStatus)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeGroupSnapshot.
+func (in *VolumeGroupSnapshot) DeepCopy() *VolumeGroupSnapshot {
+ if in == nil {
+ return nil
+ }
+ out := new(VolumeGroupSnapshot)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *VolumeGroupSnapshot) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VolumeGroupSnapshotClass) DeepCopyInto(out *VolumeGroupSnapshotClass) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ if in.Parameters != nil {
+ in, out := &in.Parameters, &out.Parameters
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeGroupSnapshotClass.
+func (in *VolumeGroupSnapshotClass) DeepCopy() *VolumeGroupSnapshotClass {
+ if in == nil {
+ return nil
+ }
+ out := new(VolumeGroupSnapshotClass)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *VolumeGroupSnapshotClass) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VolumeGroupSnapshotClassList) DeepCopyInto(out *VolumeGroupSnapshotClassList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]VolumeGroupSnapshotClass, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeGroupSnapshotClassList.
+func (in *VolumeGroupSnapshotClassList) DeepCopy() *VolumeGroupSnapshotClassList {
+ if in == nil {
+ return nil
+ }
+ out := new(VolumeGroupSnapshotClassList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *VolumeGroupSnapshotClassList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VolumeGroupSnapshotContent) DeepCopyInto(out *VolumeGroupSnapshotContent) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ if in.Status != nil {
+ in, out := &in.Status, &out.Status
+ *out = new(VolumeGroupSnapshotContentStatus)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeGroupSnapshotContent.
+func (in *VolumeGroupSnapshotContent) DeepCopy() *VolumeGroupSnapshotContent {
+ if in == nil {
+ return nil
+ }
+ out := new(VolumeGroupSnapshotContent)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *VolumeGroupSnapshotContent) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VolumeGroupSnapshotContentList) DeepCopyInto(out *VolumeGroupSnapshotContentList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]VolumeGroupSnapshotContent, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeGroupSnapshotContentList.
+func (in *VolumeGroupSnapshotContentList) DeepCopy() *VolumeGroupSnapshotContentList {
+ if in == nil {
+ return nil
+ }
+ out := new(VolumeGroupSnapshotContentList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *VolumeGroupSnapshotContentList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VolumeGroupSnapshotContentSource) DeepCopyInto(out *VolumeGroupSnapshotContentSource) {
+ *out = *in
+ if in.VolumeHandles != nil {
+ in, out := &in.VolumeHandles, &out.VolumeHandles
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.GroupSnapshotHandles != nil {
+ in, out := &in.GroupSnapshotHandles, &out.GroupSnapshotHandles
+ *out = new(GroupSnapshotHandles)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeGroupSnapshotContentSource.
+func (in *VolumeGroupSnapshotContentSource) DeepCopy() *VolumeGroupSnapshotContentSource {
+ if in == nil {
+ return nil
+ }
+ out := new(VolumeGroupSnapshotContentSource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VolumeGroupSnapshotContentSpec) DeepCopyInto(out *VolumeGroupSnapshotContentSpec) {
+ *out = *in
+ out.VolumeGroupSnapshotRef = in.VolumeGroupSnapshotRef
+ if in.VolumeGroupSnapshotClassName != nil {
+ in, out := &in.VolumeGroupSnapshotClassName, &out.VolumeGroupSnapshotClassName
+ *out = new(string)
+ **out = **in
+ }
+ in.Source.DeepCopyInto(&out.Source)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeGroupSnapshotContentSpec.
+func (in *VolumeGroupSnapshotContentSpec) DeepCopy() *VolumeGroupSnapshotContentSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(VolumeGroupSnapshotContentSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VolumeGroupSnapshotContentStatus) DeepCopyInto(out *VolumeGroupSnapshotContentStatus) {
+ *out = *in
+ if in.VolumeGroupSnapshotHandle != nil {
+ in, out := &in.VolumeGroupSnapshotHandle, &out.VolumeGroupSnapshotHandle
+ *out = new(string)
+ **out = **in
+ }
+ if in.CreationTime != nil {
+ in, out := &in.CreationTime, &out.CreationTime
+ *out = (*in).DeepCopy()
+ }
+ if in.ReadyToUse != nil {
+ in, out := &in.ReadyToUse, &out.ReadyToUse
+ *out = new(bool)
+ **out = **in
+ }
+ if in.Error != nil {
+ in, out := &in.Error, &out.Error
+ *out = new(v1.VolumeSnapshotError)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.VolumeSnapshotInfoList != nil {
+ in, out := &in.VolumeSnapshotInfoList, &out.VolumeSnapshotInfoList
+ *out = make([]VolumeSnapshotInfo, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeGroupSnapshotContentStatus.
+func (in *VolumeGroupSnapshotContentStatus) DeepCopy() *VolumeGroupSnapshotContentStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(VolumeGroupSnapshotContentStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VolumeGroupSnapshotList) DeepCopyInto(out *VolumeGroupSnapshotList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]VolumeGroupSnapshot, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeGroupSnapshotList.
+func (in *VolumeGroupSnapshotList) DeepCopy() *VolumeGroupSnapshotList {
+ if in == nil {
+ return nil
+ }
+ out := new(VolumeGroupSnapshotList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *VolumeGroupSnapshotList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VolumeGroupSnapshotSource) DeepCopyInto(out *VolumeGroupSnapshotSource) {
+ *out = *in
+ if in.Selector != nil {
+ in, out := &in.Selector, &out.Selector
+ *out = new(metav1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.VolumeGroupSnapshotContentName != nil {
+ in, out := &in.VolumeGroupSnapshotContentName, &out.VolumeGroupSnapshotContentName
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeGroupSnapshotSource.
+func (in *VolumeGroupSnapshotSource) DeepCopy() *VolumeGroupSnapshotSource {
+ if in == nil {
+ return nil
+ }
+ out := new(VolumeGroupSnapshotSource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VolumeGroupSnapshotSpec) DeepCopyInto(out *VolumeGroupSnapshotSpec) {
+ *out = *in
+ in.Source.DeepCopyInto(&out.Source)
+ if in.VolumeGroupSnapshotClassName != nil {
+ in, out := &in.VolumeGroupSnapshotClassName, &out.VolumeGroupSnapshotClassName
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeGroupSnapshotSpec.
+func (in *VolumeGroupSnapshotSpec) DeepCopy() *VolumeGroupSnapshotSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(VolumeGroupSnapshotSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VolumeGroupSnapshotStatus) DeepCopyInto(out *VolumeGroupSnapshotStatus) {
+ *out = *in
+ if in.BoundVolumeGroupSnapshotContentName != nil {
+ in, out := &in.BoundVolumeGroupSnapshotContentName, &out.BoundVolumeGroupSnapshotContentName
+ *out = new(string)
+ **out = **in
+ }
+ if in.CreationTime != nil {
+ in, out := &in.CreationTime, &out.CreationTime
+ *out = (*in).DeepCopy()
+ }
+ if in.ReadyToUse != nil {
+ in, out := &in.ReadyToUse, &out.ReadyToUse
+ *out = new(bool)
+ **out = **in
+ }
+ if in.Error != nil {
+ in, out := &in.Error, &out.Error
+ *out = new(v1.VolumeSnapshotError)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeGroupSnapshotStatus.
+func (in *VolumeGroupSnapshotStatus) DeepCopy() *VolumeGroupSnapshotStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(VolumeGroupSnapshotStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VolumeSnapshotHandlePair) DeepCopyInto(out *VolumeSnapshotHandlePair) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotHandlePair.
+func (in *VolumeSnapshotHandlePair) DeepCopy() *VolumeSnapshotHandlePair {
+ if in == nil {
+ return nil
+ }
+ out := new(VolumeSnapshotHandlePair)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VolumeSnapshotInfo) DeepCopyInto(out *VolumeSnapshotInfo) {
+ *out = *in
+ if in.CreationTime != nil {
+ in, out := &in.CreationTime, &out.CreationTime
+ *out = new(int64)
+ **out = **in
+ }
+ if in.ReadyToUse != nil {
+ in, out := &in.ReadyToUse, &out.ReadyToUse
+ *out = new(bool)
+ **out = **in
+ }
+ if in.RestoreSize != nil {
+ in, out := &in.RestoreSize, &out.RestoreSize
+ *out = new(int64)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotInfo.
+func (in *VolumeSnapshotInfo) DeepCopy() *VolumeSnapshotInfo {
+ if in == nil {
+ return nil
+ }
+ out := new(VolumeSnapshotInfo)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/client/clientset/versioned/clientset.go b/client/clientset/versioned/clientset.go
index 050de3b87..9a165783e 100644
--- a/client/clientset/versioned/clientset.go
+++ b/client/clientset/versioned/clientset.go
@@ -1,5 +1,5 @@
/*
-Copyright 2024 The Kubernetes Authors.
+Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -23,6 +23,7 @@ import (
"net/http"
groupsnapshotv1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v8/clientset/versioned/typed/volumegroupsnapshot/v1beta1"
+ groupsnapshotv1beta2 "github.com/kubernetes-csi/external-snapshotter/client/v8/clientset/versioned/typed/volumegroupsnapshot/v1beta2"
snapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/clientset/versioned/typed/volumesnapshot/v1"
discovery "k8s.io/client-go/discovery"
rest "k8s.io/client-go/rest"
@@ -32,6 +33,7 @@ import (
type Interface interface {
Discovery() discovery.DiscoveryInterface
GroupsnapshotV1beta1() groupsnapshotv1beta1.GroupsnapshotV1beta1Interface
+ GroupsnapshotV1beta2() groupsnapshotv1beta2.GroupsnapshotV1beta2Interface
SnapshotV1() snapshotv1.SnapshotV1Interface
}
@@ -39,6 +41,7 @@ type Interface interface {
type Clientset struct {
*discovery.DiscoveryClient
groupsnapshotV1beta1 *groupsnapshotv1beta1.GroupsnapshotV1beta1Client
+ groupsnapshotV1beta2 *groupsnapshotv1beta2.GroupsnapshotV1beta2Client
snapshotV1 *snapshotv1.SnapshotV1Client
}
@@ -47,6 +50,11 @@ func (c *Clientset) GroupsnapshotV1beta1() groupsnapshotv1beta1.GroupsnapshotV1b
return c.groupsnapshotV1beta1
}
+// GroupsnapshotV1beta2 retrieves the GroupsnapshotV1beta2Client
+func (c *Clientset) GroupsnapshotV1beta2() groupsnapshotv1beta2.GroupsnapshotV1beta2Interface {
+ return c.groupsnapshotV1beta2
+}
+
// SnapshotV1 retrieves the SnapshotV1Client
func (c *Clientset) SnapshotV1() snapshotv1.SnapshotV1Interface {
return c.snapshotV1
@@ -100,6 +108,10 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset,
if err != nil {
return nil, err
}
+ cs.groupsnapshotV1beta2, err = groupsnapshotv1beta2.NewForConfigAndClient(&configShallowCopy, httpClient)
+ if err != nil {
+ return nil, err
+ }
cs.snapshotV1, err = snapshotv1.NewForConfigAndClient(&configShallowCopy, httpClient)
if err != nil {
return nil, err
@@ -126,6 +138,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset {
func New(c rest.Interface) *Clientset {
var cs Clientset
cs.groupsnapshotV1beta1 = groupsnapshotv1beta1.New(c)
+ cs.groupsnapshotV1beta2 = groupsnapshotv1beta2.New(c)
cs.snapshotV1 = snapshotv1.New(c)
cs.DiscoveryClient = discovery.NewDiscoveryClient(c)
diff --git a/client/clientset/versioned/fake/clientset_generated.go b/client/clientset/versioned/fake/clientset_generated.go
index 637c89f34..600753ecb 100644
--- a/client/clientset/versioned/fake/clientset_generated.go
+++ b/client/clientset/versioned/fake/clientset_generated.go
@@ -1,5 +1,5 @@
/*
-Copyright 2024 The Kubernetes Authors.
+Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -22,6 +22,8 @@ import (
clientset "github.com/kubernetes-csi/external-snapshotter/client/v8/clientset/versioned"
groupsnapshotv1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v8/clientset/versioned/typed/volumegroupsnapshot/v1beta1"
fakegroupsnapshotv1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v8/clientset/versioned/typed/volumegroupsnapshot/v1beta1/fake"
+ groupsnapshotv1beta2 "github.com/kubernetes-csi/external-snapshotter/client/v8/clientset/versioned/typed/volumegroupsnapshot/v1beta2"
+ fakegroupsnapshotv1beta2 "github.com/kubernetes-csi/external-snapshotter/client/v8/clientset/versioned/typed/volumegroupsnapshot/v1beta2/fake"
snapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/clientset/versioned/typed/volumesnapshot/v1"
fakesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/clientset/versioned/typed/volumesnapshot/v1/fake"
"k8s.io/apimachinery/pkg/runtime"
@@ -86,6 +88,11 @@ func (c *Clientset) GroupsnapshotV1beta1() groupsnapshotv1beta1.GroupsnapshotV1b
return &fakegroupsnapshotv1beta1.FakeGroupsnapshotV1beta1{Fake: &c.Fake}
}
+// GroupsnapshotV1beta2 retrieves the GroupsnapshotV1beta2Client
+func (c *Clientset) GroupsnapshotV1beta2() groupsnapshotv1beta2.GroupsnapshotV1beta2Interface {
+ return &fakegroupsnapshotv1beta2.FakeGroupsnapshotV1beta2{Fake: &c.Fake}
+}
+
// SnapshotV1 retrieves the SnapshotV1Client
func (c *Clientset) SnapshotV1() snapshotv1.SnapshotV1Interface {
return &fakesnapshotv1.FakeSnapshotV1{Fake: &c.Fake}
diff --git a/client/clientset/versioned/fake/doc.go b/client/clientset/versioned/fake/doc.go
index c8342d4bd..ac8c1ec9e 100644
--- a/client/clientset/versioned/fake/doc.go
+++ b/client/clientset/versioned/fake/doc.go
@@ -1,5 +1,5 @@
/*
-Copyright 2024 The Kubernetes Authors.
+Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/client/clientset/versioned/fake/register.go b/client/clientset/versioned/fake/register.go
index c1e1e26d0..7f508b6e5 100644
--- a/client/clientset/versioned/fake/register.go
+++ b/client/clientset/versioned/fake/register.go
@@ -1,5 +1,5 @@
/*
-Copyright 2024 The Kubernetes Authors.
+Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -20,6 +20,7 @@ package fake
import (
groupsnapshotv1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta1"
+ groupsnapshotv1beta2 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta2"
snapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
@@ -33,6 +34,7 @@ var codecs = serializer.NewCodecFactory(scheme)
var localSchemeBuilder = runtime.SchemeBuilder{
groupsnapshotv1beta1.AddToScheme,
+ groupsnapshotv1beta2.AddToScheme,
snapshotv1.AddToScheme,
}
diff --git a/client/clientset/versioned/scheme/doc.go b/client/clientset/versioned/scheme/doc.go
index 0fe2de913..9e531af56 100644
--- a/client/clientset/versioned/scheme/doc.go
+++ b/client/clientset/versioned/scheme/doc.go
@@ -1,5 +1,5 @@
/*
-Copyright 2024 The Kubernetes Authors.
+Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/client/clientset/versioned/scheme/register.go b/client/clientset/versioned/scheme/register.go
index 6f8c4fdf0..573d7bcc9 100644
--- a/client/clientset/versioned/scheme/register.go
+++ b/client/clientset/versioned/scheme/register.go
@@ -1,5 +1,5 @@
/*
-Copyright 2024 The Kubernetes Authors.
+Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -20,6 +20,7 @@ package scheme
import (
groupsnapshotv1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta1"
+ groupsnapshotv1beta2 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta2"
snapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
@@ -33,6 +34,7 @@ var Codecs = serializer.NewCodecFactory(Scheme)
var ParameterCodec = runtime.NewParameterCodec(Scheme)
var localSchemeBuilder = runtime.SchemeBuilder{
groupsnapshotv1beta1.AddToScheme,
+ groupsnapshotv1beta2.AddToScheme,
snapshotv1.AddToScheme,
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/generated_expansion.go b/client/clientset/versioned/typed/volumegroupsnapshot/v1beta2/doc.go
similarity index 82%
rename from vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/generated_expansion.go
rename to client/clientset/versioned/typed/volumegroupsnapshot/v1beta2/doc.go
index df12a463d..705324605 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/generated_expansion.go
+++ b/client/clientset/versioned/typed/volumegroupsnapshot/v1beta2/doc.go
@@ -1,5 +1,5 @@
/*
-Copyright The Kubernetes Authors.
+Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -16,8 +16,5 @@ limitations under the License.
// Code generated by client-gen. DO NOT EDIT.
-package v1alpha1
-
-type IPAddressExpansion interface{}
-
-type ServiceCIDRExpansion interface{}
+// This package has the automatically generated typed clients.
+package v1beta2
diff --git a/client/clientset/versioned/typed/volumegroupsnapshot/v1beta2/fake/doc.go b/client/clientset/versioned/typed/volumegroupsnapshot/v1beta2/fake/doc.go
new file mode 100644
index 000000000..18420d452
--- /dev/null
+++ b/client/clientset/versioned/typed/volumegroupsnapshot/v1beta2/fake/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// Package fake has the automatically generated clients.
+package fake
diff --git a/client/clientset/versioned/typed/volumegroupsnapshot/v1beta2/fake/fake_volumegroupsnapshot.go b/client/clientset/versioned/typed/volumegroupsnapshot/v1beta2/fake/fake_volumegroupsnapshot.go
new file mode 100644
index 000000000..b831eb560
--- /dev/null
+++ b/client/clientset/versioned/typed/volumegroupsnapshot/v1beta2/fake/fake_volumegroupsnapshot.go
@@ -0,0 +1,141 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "context"
+
+ v1beta2 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta2"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeVolumeGroupSnapshots implements VolumeGroupSnapshotInterface
+type FakeVolumeGroupSnapshots struct {
+ Fake *FakeGroupsnapshotV1beta2
+ ns string
+}
+
+var volumegroupsnapshotsResource = v1beta2.SchemeGroupVersion.WithResource("volumegroupsnapshots")
+
+var volumegroupsnapshotsKind = v1beta2.SchemeGroupVersion.WithKind("VolumeGroupSnapshot")
+
+// Get takes name of the volumeGroupSnapshot, and returns the corresponding volumeGroupSnapshot object, and an error if there is any.
+func (c *FakeVolumeGroupSnapshots) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.VolumeGroupSnapshot, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(volumegroupsnapshotsResource, c.ns, name), &v1beta2.VolumeGroupSnapshot{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta2.VolumeGroupSnapshot), err
+}
+
+// List takes label and field selectors, and returns the list of VolumeGroupSnapshots that match those selectors.
+func (c *FakeVolumeGroupSnapshots) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.VolumeGroupSnapshotList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(volumegroupsnapshotsResource, volumegroupsnapshotsKind, c.ns, opts), &v1beta2.VolumeGroupSnapshotList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1beta2.VolumeGroupSnapshotList{ListMeta: obj.(*v1beta2.VolumeGroupSnapshotList).ListMeta}
+ for _, item := range obj.(*v1beta2.VolumeGroupSnapshotList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested volumeGroupSnapshots.
+func (c *FakeVolumeGroupSnapshots) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(volumegroupsnapshotsResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a volumeGroupSnapshot and creates it. Returns the server's representation of the volumeGroupSnapshot, and an error, if there is any.
+func (c *FakeVolumeGroupSnapshots) Create(ctx context.Context, volumeGroupSnapshot *v1beta2.VolumeGroupSnapshot, opts v1.CreateOptions) (result *v1beta2.VolumeGroupSnapshot, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(volumegroupsnapshotsResource, c.ns, volumeGroupSnapshot), &v1beta2.VolumeGroupSnapshot{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta2.VolumeGroupSnapshot), err
+}
+
+// Update takes the representation of a volumeGroupSnapshot and updates it. Returns the server's representation of the volumeGroupSnapshot, and an error, if there is any.
+func (c *FakeVolumeGroupSnapshots) Update(ctx context.Context, volumeGroupSnapshot *v1beta2.VolumeGroupSnapshot, opts v1.UpdateOptions) (result *v1beta2.VolumeGroupSnapshot, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(volumegroupsnapshotsResource, c.ns, volumeGroupSnapshot), &v1beta2.VolumeGroupSnapshot{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta2.VolumeGroupSnapshot), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeVolumeGroupSnapshots) UpdateStatus(ctx context.Context, volumeGroupSnapshot *v1beta2.VolumeGroupSnapshot, opts v1.UpdateOptions) (*v1beta2.VolumeGroupSnapshot, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(volumegroupsnapshotsResource, "status", c.ns, volumeGroupSnapshot), &v1beta2.VolumeGroupSnapshot{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta2.VolumeGroupSnapshot), err
+}
+
+// Delete takes name of the volumeGroupSnapshot and deletes it. Returns an error if one occurs.
+func (c *FakeVolumeGroupSnapshots) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteActionWithOptions(volumegroupsnapshotsResource, c.ns, name, opts), &v1beta2.VolumeGroupSnapshot{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeVolumeGroupSnapshots) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(volumegroupsnapshotsResource, c.ns, listOpts)
+
+ _, err := c.Fake.Invokes(action, &v1beta2.VolumeGroupSnapshotList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched volumeGroupSnapshot.
+func (c *FakeVolumeGroupSnapshots) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.VolumeGroupSnapshot, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(volumegroupsnapshotsResource, c.ns, name, pt, data, subresources...), &v1beta2.VolumeGroupSnapshot{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta2.VolumeGroupSnapshot), err
+}
diff --git a/client/clientset/versioned/typed/volumegroupsnapshot/v1beta2/fake/fake_volumegroupsnapshot_client.go b/client/clientset/versioned/typed/volumegroupsnapshot/v1beta2/fake/fake_volumegroupsnapshot_client.go
new file mode 100644
index 000000000..a9a0fa4c3
--- /dev/null
+++ b/client/clientset/versioned/typed/volumegroupsnapshot/v1beta2/fake/fake_volumegroupsnapshot_client.go
@@ -0,0 +1,48 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1beta2 "github.com/kubernetes-csi/external-snapshotter/client/v8/clientset/versioned/typed/volumegroupsnapshot/v1beta2"
+ rest "k8s.io/client-go/rest"
+ testing "k8s.io/client-go/testing"
+)
+
+type FakeGroupsnapshotV1beta2 struct {
+ *testing.Fake
+}
+
+func (c *FakeGroupsnapshotV1beta2) VolumeGroupSnapshots(namespace string) v1beta2.VolumeGroupSnapshotInterface {
+ return &FakeVolumeGroupSnapshots{c, namespace}
+}
+
+func (c *FakeGroupsnapshotV1beta2) VolumeGroupSnapshotClasses() v1beta2.VolumeGroupSnapshotClassInterface {
+ return &FakeVolumeGroupSnapshotClasses{c}
+}
+
+func (c *FakeGroupsnapshotV1beta2) VolumeGroupSnapshotContents() v1beta2.VolumeGroupSnapshotContentInterface {
+ return &FakeVolumeGroupSnapshotContents{c}
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *FakeGroupsnapshotV1beta2) RESTClient() rest.Interface {
+ var ret *rest.RESTClient
+ return ret
+}
diff --git a/client/clientset/versioned/typed/volumegroupsnapshot/v1beta2/fake/fake_volumegroupsnapshotclass.go b/client/clientset/versioned/typed/volumegroupsnapshot/v1beta2/fake/fake_volumegroupsnapshotclass.go
new file mode 100644
index 000000000..499575d42
--- /dev/null
+++ b/client/clientset/versioned/typed/volumegroupsnapshot/v1beta2/fake/fake_volumegroupsnapshotclass.go
@@ -0,0 +1,121 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "context"
+
+ v1beta2 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta2"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeVolumeGroupSnapshotClasses implements VolumeGroupSnapshotClassInterface
+type FakeVolumeGroupSnapshotClasses struct {
+ Fake *FakeGroupsnapshotV1beta2
+}
+
+var volumegroupsnapshotclassesResource = v1beta2.SchemeGroupVersion.WithResource("volumegroupsnapshotclasses")
+
+var volumegroupsnapshotclassesKind = v1beta2.SchemeGroupVersion.WithKind("VolumeGroupSnapshotClass")
+
+// Get takes name of the volumeGroupSnapshotClass, and returns the corresponding volumeGroupSnapshotClass object, and an error if there is any.
+func (c *FakeVolumeGroupSnapshotClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.VolumeGroupSnapshotClass, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootGetAction(volumegroupsnapshotclassesResource, name), &v1beta2.VolumeGroupSnapshotClass{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta2.VolumeGroupSnapshotClass), err
+}
+
+// List takes label and field selectors, and returns the list of VolumeGroupSnapshotClasses that match those selectors.
+func (c *FakeVolumeGroupSnapshotClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.VolumeGroupSnapshotClassList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootListAction(volumegroupsnapshotclassesResource, volumegroupsnapshotclassesKind, opts), &v1beta2.VolumeGroupSnapshotClassList{})
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1beta2.VolumeGroupSnapshotClassList{ListMeta: obj.(*v1beta2.VolumeGroupSnapshotClassList).ListMeta}
+ for _, item := range obj.(*v1beta2.VolumeGroupSnapshotClassList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested volumeGroupSnapshotClasses.
+func (c *FakeVolumeGroupSnapshotClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewRootWatchAction(volumegroupsnapshotclassesResource, opts))
+}
+
+// Create takes the representation of a volumeGroupSnapshotClass and creates it. Returns the server's representation of the volumeGroupSnapshotClass, and an error, if there is any.
+func (c *FakeVolumeGroupSnapshotClasses) Create(ctx context.Context, volumeGroupSnapshotClass *v1beta2.VolumeGroupSnapshotClass, opts v1.CreateOptions) (result *v1beta2.VolumeGroupSnapshotClass, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootCreateAction(volumegroupsnapshotclassesResource, volumeGroupSnapshotClass), &v1beta2.VolumeGroupSnapshotClass{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta2.VolumeGroupSnapshotClass), err
+}
+
+// Update takes the representation of a volumeGroupSnapshotClass and updates it. Returns the server's representation of the volumeGroupSnapshotClass, and an error, if there is any.
+func (c *FakeVolumeGroupSnapshotClasses) Update(ctx context.Context, volumeGroupSnapshotClass *v1beta2.VolumeGroupSnapshotClass, opts v1.UpdateOptions) (result *v1beta2.VolumeGroupSnapshotClass, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateAction(volumegroupsnapshotclassesResource, volumeGroupSnapshotClass), &v1beta2.VolumeGroupSnapshotClass{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta2.VolumeGroupSnapshotClass), err
+}
+
+// Delete takes name of the volumeGroupSnapshotClass and deletes it. Returns an error if one occurs.
+func (c *FakeVolumeGroupSnapshotClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewRootDeleteActionWithOptions(volumegroupsnapshotclassesResource, name, opts), &v1beta2.VolumeGroupSnapshotClass{})
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeVolumeGroupSnapshotClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ action := testing.NewRootDeleteCollectionAction(volumegroupsnapshotclassesResource, listOpts)
+
+ _, err := c.Fake.Invokes(action, &v1beta2.VolumeGroupSnapshotClassList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched volumeGroupSnapshotClass.
+func (c *FakeVolumeGroupSnapshotClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.VolumeGroupSnapshotClass, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootPatchSubresourceAction(volumegroupsnapshotclassesResource, name, pt, data, subresources...), &v1beta2.VolumeGroupSnapshotClass{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta2.VolumeGroupSnapshotClass), err
+}
diff --git a/client/clientset/versioned/typed/volumegroupsnapshot/v1beta2/fake/fake_volumegroupsnapshotcontent.go b/client/clientset/versioned/typed/volumegroupsnapshot/v1beta2/fake/fake_volumegroupsnapshotcontent.go
new file mode 100644
index 000000000..26e1fb533
--- /dev/null
+++ b/client/clientset/versioned/typed/volumegroupsnapshot/v1beta2/fake/fake_volumegroupsnapshotcontent.go
@@ -0,0 +1,132 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "context"
+
+ v1beta2 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta2"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeVolumeGroupSnapshotContents implements VolumeGroupSnapshotContentInterface
+type FakeVolumeGroupSnapshotContents struct {
+ Fake *FakeGroupsnapshotV1beta2
+}
+
+var volumegroupsnapshotcontentsResource = v1beta2.SchemeGroupVersion.WithResource("volumegroupsnapshotcontents")
+
+var volumegroupsnapshotcontentsKind = v1beta2.SchemeGroupVersion.WithKind("VolumeGroupSnapshotContent")
+
+// Get takes name of the volumeGroupSnapshotContent, and returns the corresponding volumeGroupSnapshotContent object, and an error if there is any.
+func (c *FakeVolumeGroupSnapshotContents) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.VolumeGroupSnapshotContent, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootGetAction(volumegroupsnapshotcontentsResource, name), &v1beta2.VolumeGroupSnapshotContent{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta2.VolumeGroupSnapshotContent), err
+}
+
+// List takes label and field selectors, and returns the list of VolumeGroupSnapshotContents that match those selectors.
+func (c *FakeVolumeGroupSnapshotContents) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.VolumeGroupSnapshotContentList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootListAction(volumegroupsnapshotcontentsResource, volumegroupsnapshotcontentsKind, opts), &v1beta2.VolumeGroupSnapshotContentList{})
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1beta2.VolumeGroupSnapshotContentList{ListMeta: obj.(*v1beta2.VolumeGroupSnapshotContentList).ListMeta}
+ for _, item := range obj.(*v1beta2.VolumeGroupSnapshotContentList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested volumeGroupSnapshotContents.
+func (c *FakeVolumeGroupSnapshotContents) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewRootWatchAction(volumegroupsnapshotcontentsResource, opts))
+}
+
+// Create takes the representation of a volumeGroupSnapshotContent and creates it. Returns the server's representation of the volumeGroupSnapshotContent, and an error, if there is any.
+func (c *FakeVolumeGroupSnapshotContents) Create(ctx context.Context, volumeGroupSnapshotContent *v1beta2.VolumeGroupSnapshotContent, opts v1.CreateOptions) (result *v1beta2.VolumeGroupSnapshotContent, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootCreateAction(volumegroupsnapshotcontentsResource, volumeGroupSnapshotContent), &v1beta2.VolumeGroupSnapshotContent{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta2.VolumeGroupSnapshotContent), err
+}
+
+// Update takes the representation of a volumeGroupSnapshotContent and updates it. Returns the server's representation of the volumeGroupSnapshotContent, and an error, if there is any.
+func (c *FakeVolumeGroupSnapshotContents) Update(ctx context.Context, volumeGroupSnapshotContent *v1beta2.VolumeGroupSnapshotContent, opts v1.UpdateOptions) (result *v1beta2.VolumeGroupSnapshotContent, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateAction(volumegroupsnapshotcontentsResource, volumeGroupSnapshotContent), &v1beta2.VolumeGroupSnapshotContent{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta2.VolumeGroupSnapshotContent), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeVolumeGroupSnapshotContents) UpdateStatus(ctx context.Context, volumeGroupSnapshotContent *v1beta2.VolumeGroupSnapshotContent, opts v1.UpdateOptions) (*v1beta2.VolumeGroupSnapshotContent, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateSubresourceAction(volumegroupsnapshotcontentsResource, "status", volumeGroupSnapshotContent), &v1beta2.VolumeGroupSnapshotContent{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta2.VolumeGroupSnapshotContent), err
+}
+
+// Delete takes name of the volumeGroupSnapshotContent and deletes it. Returns an error if one occurs.
+func (c *FakeVolumeGroupSnapshotContents) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewRootDeleteActionWithOptions(volumegroupsnapshotcontentsResource, name, opts), &v1beta2.VolumeGroupSnapshotContent{})
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeVolumeGroupSnapshotContents) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ action := testing.NewRootDeleteCollectionAction(volumegroupsnapshotcontentsResource, listOpts)
+
+ _, err := c.Fake.Invokes(action, &v1beta2.VolumeGroupSnapshotContentList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched volumeGroupSnapshotContent.
+func (c *FakeVolumeGroupSnapshotContents) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.VolumeGroupSnapshotContent, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootPatchSubresourceAction(volumegroupsnapshotcontentsResource, name, pt, data, subresources...), &v1beta2.VolumeGroupSnapshotContent{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta2.VolumeGroupSnapshotContent), err
+}
diff --git a/client/clientset/versioned/typed/volumegroupsnapshot/v1beta2/generated_expansion.go b/client/clientset/versioned/typed/volumegroupsnapshot/v1beta2/generated_expansion.go
new file mode 100644
index 000000000..ed8318ed6
--- /dev/null
+++ b/client/clientset/versioned/typed/volumegroupsnapshot/v1beta2/generated_expansion.go
@@ -0,0 +1,25 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta2
+
+type VolumeGroupSnapshotExpansion interface{}
+
+type VolumeGroupSnapshotClassExpansion interface{}
+
+type VolumeGroupSnapshotContentExpansion interface{}
diff --git a/client/clientset/versioned/typed/volumegroupsnapshot/v1beta2/volumegroupsnapshot.go b/client/clientset/versioned/typed/volumegroupsnapshot/v1beta2/volumegroupsnapshot.go
new file mode 100644
index 000000000..a9b7a8f64
--- /dev/null
+++ b/client/clientset/versioned/typed/volumegroupsnapshot/v1beta2/volumegroupsnapshot.go
@@ -0,0 +1,195 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta2
+
+import (
+ "context"
+ "time"
+
+ v1beta2 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta2"
+ scheme "github.com/kubernetes-csi/external-snapshotter/client/v8/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// VolumeGroupSnapshotsGetter has a method to return a VolumeGroupSnapshotInterface.
+// A group's client should implement this interface.
+type VolumeGroupSnapshotsGetter interface {
+ VolumeGroupSnapshots(namespace string) VolumeGroupSnapshotInterface
+}
+
+// VolumeGroupSnapshotInterface has methods to work with VolumeGroupSnapshot resources.
+type VolumeGroupSnapshotInterface interface {
+ Create(ctx context.Context, volumeGroupSnapshot *v1beta2.VolumeGroupSnapshot, opts v1.CreateOptions) (*v1beta2.VolumeGroupSnapshot, error)
+ Update(ctx context.Context, volumeGroupSnapshot *v1beta2.VolumeGroupSnapshot, opts v1.UpdateOptions) (*v1beta2.VolumeGroupSnapshot, error)
+ UpdateStatus(ctx context.Context, volumeGroupSnapshot *v1beta2.VolumeGroupSnapshot, opts v1.UpdateOptions) (*v1beta2.VolumeGroupSnapshot, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta2.VolumeGroupSnapshot, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1beta2.VolumeGroupSnapshotList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.VolumeGroupSnapshot, err error)
+ VolumeGroupSnapshotExpansion
+}
+
+// volumeGroupSnapshots implements VolumeGroupSnapshotInterface
+type volumeGroupSnapshots struct {
+ client rest.Interface
+ ns string
+}
+
+// newVolumeGroupSnapshots returns a VolumeGroupSnapshots
+func newVolumeGroupSnapshots(c *GroupsnapshotV1beta2Client, namespace string) *volumeGroupSnapshots {
+ return &volumeGroupSnapshots{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the volumeGroupSnapshot, and returns the corresponding volumeGroupSnapshot object, and an error if there is any.
+func (c *volumeGroupSnapshots) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.VolumeGroupSnapshot, err error) {
+ result = &v1beta2.VolumeGroupSnapshot{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("volumegroupsnapshots").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of VolumeGroupSnapshots that match those selectors.
+func (c *volumeGroupSnapshots) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.VolumeGroupSnapshotList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1beta2.VolumeGroupSnapshotList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("volumegroupsnapshots").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested volumeGroupSnapshots.
+func (c *volumeGroupSnapshots) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("volumegroupsnapshots").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a volumeGroupSnapshot and creates it. Returns the server's representation of the volumeGroupSnapshot, and an error, if there is any.
+func (c *volumeGroupSnapshots) Create(ctx context.Context, volumeGroupSnapshot *v1beta2.VolumeGroupSnapshot, opts v1.CreateOptions) (result *v1beta2.VolumeGroupSnapshot, err error) {
+ result = &v1beta2.VolumeGroupSnapshot{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("volumegroupsnapshots").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(volumeGroupSnapshot).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a volumeGroupSnapshot and updates it. Returns the server's representation of the volumeGroupSnapshot, and an error, if there is any.
+func (c *volumeGroupSnapshots) Update(ctx context.Context, volumeGroupSnapshot *v1beta2.VolumeGroupSnapshot, opts v1.UpdateOptions) (result *v1beta2.VolumeGroupSnapshot, err error) {
+ result = &v1beta2.VolumeGroupSnapshot{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("volumegroupsnapshots").
+ Name(volumeGroupSnapshot.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(volumeGroupSnapshot).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *volumeGroupSnapshots) UpdateStatus(ctx context.Context, volumeGroupSnapshot *v1beta2.VolumeGroupSnapshot, opts v1.UpdateOptions) (result *v1beta2.VolumeGroupSnapshot, err error) {
+ result = &v1beta2.VolumeGroupSnapshot{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("volumegroupsnapshots").
+ Name(volumeGroupSnapshot.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(volumeGroupSnapshot).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the volumeGroupSnapshot and deletes it. Returns an error if one occurs.
+func (c *volumeGroupSnapshots) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("volumegroupsnapshots").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *volumeGroupSnapshots) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("volumegroupsnapshots").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched volumeGroupSnapshot.
+func (c *volumeGroupSnapshots) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.VolumeGroupSnapshot, err error) {
+ result = &v1beta2.VolumeGroupSnapshot{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("volumegroupsnapshots").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/client/clientset/versioned/typed/volumegroupsnapshot/v1beta2/volumegroupsnapshot_client.go b/client/clientset/versioned/typed/volumegroupsnapshot/v1beta2/volumegroupsnapshot_client.go
new file mode 100644
index 000000000..500229c0f
--- /dev/null
+++ b/client/clientset/versioned/typed/volumegroupsnapshot/v1beta2/volumegroupsnapshot_client.go
@@ -0,0 +1,117 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta2
+
+import (
+ "net/http"
+
+ v1beta2 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta2"
+ "github.com/kubernetes-csi/external-snapshotter/client/v8/clientset/versioned/scheme"
+ rest "k8s.io/client-go/rest"
+)
+
+type GroupsnapshotV1beta2Interface interface {
+ RESTClient() rest.Interface
+ VolumeGroupSnapshotsGetter
+ VolumeGroupSnapshotClassesGetter
+ VolumeGroupSnapshotContentsGetter
+}
+
+// GroupsnapshotV1beta2Client is used to interact with features provided by the groupsnapshot.storage.k8s.io group.
+type GroupsnapshotV1beta2Client struct {
+ restClient rest.Interface
+}
+
+func (c *GroupsnapshotV1beta2Client) VolumeGroupSnapshots(namespace string) VolumeGroupSnapshotInterface {
+ return newVolumeGroupSnapshots(c, namespace)
+}
+
+func (c *GroupsnapshotV1beta2Client) VolumeGroupSnapshotClasses() VolumeGroupSnapshotClassInterface {
+ return newVolumeGroupSnapshotClasses(c)
+}
+
+func (c *GroupsnapshotV1beta2Client) VolumeGroupSnapshotContents() VolumeGroupSnapshotContentInterface {
+ return newVolumeGroupSnapshotContents(c)
+}
+
+// NewForConfig creates a new GroupsnapshotV1beta2Client for the given config.
+// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
+// where httpClient was generated with rest.HTTPClientFor(c).
+func NewForConfig(c *rest.Config) (*GroupsnapshotV1beta2Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ httpClient, err := rest.HTTPClientFor(&config)
+ if err != nil {
+ return nil, err
+ }
+ return NewForConfigAndClient(&config, httpClient)
+}
+
+// NewForConfigAndClient creates a new GroupsnapshotV1beta2Client for the given config and http client.
+// Note the http client provided takes precedence over the configured transport values.
+func NewForConfigAndClient(c *rest.Config, h *http.Client) (*GroupsnapshotV1beta2Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ client, err := rest.RESTClientForConfigAndClient(&config, h)
+ if err != nil {
+ return nil, err
+ }
+ return &GroupsnapshotV1beta2Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new GroupsnapshotV1beta2Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *GroupsnapshotV1beta2Client {
+ client, err := NewForConfig(c)
+ if err != nil {
+ panic(err)
+ }
+ return client
+}
+
+// New creates a new GroupsnapshotV1beta2Client for the given RESTClient.
+func New(c rest.Interface) *GroupsnapshotV1beta2Client {
+ return &GroupsnapshotV1beta2Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+ gv := v1beta2.SchemeGroupVersion
+ config.GroupVersion = &gv
+ config.APIPath = "/apis"
+ config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+
+ if config.UserAgent == "" {
+ config.UserAgent = rest.DefaultKubernetesUserAgent()
+ }
+
+ return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *GroupsnapshotV1beta2Client) RESTClient() rest.Interface {
+ if c == nil {
+ return nil
+ }
+ return c.restClient
+}
diff --git a/client/clientset/versioned/typed/volumegroupsnapshot/v1beta2/volumegroupsnapshotclass.go b/client/clientset/versioned/typed/volumegroupsnapshot/v1beta2/volumegroupsnapshotclass.go
new file mode 100644
index 000000000..e03c85cfb
--- /dev/null
+++ b/client/clientset/versioned/typed/volumegroupsnapshot/v1beta2/volumegroupsnapshotclass.go
@@ -0,0 +1,168 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta2
+
+import (
+ "context"
+ "time"
+
+ v1beta2 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta2"
+ scheme "github.com/kubernetes-csi/external-snapshotter/client/v8/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// VolumeGroupSnapshotClassesGetter has a method to return a VolumeGroupSnapshotClassInterface.
+// A group's client should implement this interface.
+type VolumeGroupSnapshotClassesGetter interface {
+ VolumeGroupSnapshotClasses() VolumeGroupSnapshotClassInterface
+}
+
+// VolumeGroupSnapshotClassInterface has methods to work with VolumeGroupSnapshotClass resources.
+type VolumeGroupSnapshotClassInterface interface {
+ Create(ctx context.Context, volumeGroupSnapshotClass *v1beta2.VolumeGroupSnapshotClass, opts v1.CreateOptions) (*v1beta2.VolumeGroupSnapshotClass, error)
+ Update(ctx context.Context, volumeGroupSnapshotClass *v1beta2.VolumeGroupSnapshotClass, opts v1.UpdateOptions) (*v1beta2.VolumeGroupSnapshotClass, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta2.VolumeGroupSnapshotClass, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1beta2.VolumeGroupSnapshotClassList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.VolumeGroupSnapshotClass, err error)
+ VolumeGroupSnapshotClassExpansion
+}
+
+// volumeGroupSnapshotClasses implements VolumeGroupSnapshotClassInterface
+type volumeGroupSnapshotClasses struct {
+ client rest.Interface
+}
+
+// newVolumeGroupSnapshotClasses returns a VolumeGroupSnapshotClasses
+func newVolumeGroupSnapshotClasses(c *GroupsnapshotV1beta2Client) *volumeGroupSnapshotClasses {
+ return &volumeGroupSnapshotClasses{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the volumeGroupSnapshotClass, and returns the corresponding volumeGroupSnapshotClass object, and an error if there is any.
+func (c *volumeGroupSnapshotClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.VolumeGroupSnapshotClass, err error) {
+ result = &v1beta2.VolumeGroupSnapshotClass{}
+ err = c.client.Get().
+ Resource("volumegroupsnapshotclasses").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of VolumeGroupSnapshotClasses that match those selectors.
+func (c *volumeGroupSnapshotClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.VolumeGroupSnapshotClassList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1beta2.VolumeGroupSnapshotClassList{}
+ err = c.client.Get().
+ Resource("volumegroupsnapshotclasses").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested volumeGroupSnapshotClasses.
+func (c *volumeGroupSnapshotClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("volumegroupsnapshotclasses").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a volumeGroupSnapshotClass and creates it. Returns the server's representation of the volumeGroupSnapshotClass, and an error, if there is any.
+func (c *volumeGroupSnapshotClasses) Create(ctx context.Context, volumeGroupSnapshotClass *v1beta2.VolumeGroupSnapshotClass, opts v1.CreateOptions) (result *v1beta2.VolumeGroupSnapshotClass, err error) {
+ result = &v1beta2.VolumeGroupSnapshotClass{}
+ err = c.client.Post().
+ Resource("volumegroupsnapshotclasses").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(volumeGroupSnapshotClass).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a volumeGroupSnapshotClass and updates it. Returns the server's representation of the volumeGroupSnapshotClass, and an error, if there is any.
+func (c *volumeGroupSnapshotClasses) Update(ctx context.Context, volumeGroupSnapshotClass *v1beta2.VolumeGroupSnapshotClass, opts v1.UpdateOptions) (result *v1beta2.VolumeGroupSnapshotClass, err error) {
+ result = &v1beta2.VolumeGroupSnapshotClass{}
+ err = c.client.Put().
+ Resource("volumegroupsnapshotclasses").
+ Name(volumeGroupSnapshotClass.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(volumeGroupSnapshotClass).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the volumeGroupSnapshotClass and deletes it. Returns an error if one occurs.
+func (c *volumeGroupSnapshotClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("volumegroupsnapshotclasses").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *volumeGroupSnapshotClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("volumegroupsnapshotclasses").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched volumeGroupSnapshotClass.
+func (c *volumeGroupSnapshotClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.VolumeGroupSnapshotClass, err error) {
+ result = &v1beta2.VolumeGroupSnapshotClass{}
+ err = c.client.Patch(pt).
+ Resource("volumegroupsnapshotclasses").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/client/clientset/versioned/typed/volumegroupsnapshot/v1beta2/volumegroupsnapshotcontent.go b/client/clientset/versioned/typed/volumegroupsnapshot/v1beta2/volumegroupsnapshotcontent.go
new file mode 100644
index 000000000..3168991fc
--- /dev/null
+++ b/client/clientset/versioned/typed/volumegroupsnapshot/v1beta2/volumegroupsnapshotcontent.go
@@ -0,0 +1,184 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta2
+
+import (
+ "context"
+ "time"
+
+ v1beta2 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta2"
+ scheme "github.com/kubernetes-csi/external-snapshotter/client/v8/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// VolumeGroupSnapshotContentsGetter has a method to return a VolumeGroupSnapshotContentInterface.
+// A group's client should implement this interface.
+type VolumeGroupSnapshotContentsGetter interface {
+ VolumeGroupSnapshotContents() VolumeGroupSnapshotContentInterface
+}
+
+// VolumeGroupSnapshotContentInterface has methods to work with VolumeGroupSnapshotContent resources.
+type VolumeGroupSnapshotContentInterface interface {
+ Create(ctx context.Context, volumeGroupSnapshotContent *v1beta2.VolumeGroupSnapshotContent, opts v1.CreateOptions) (*v1beta2.VolumeGroupSnapshotContent, error)
+ Update(ctx context.Context, volumeGroupSnapshotContent *v1beta2.VolumeGroupSnapshotContent, opts v1.UpdateOptions) (*v1beta2.VolumeGroupSnapshotContent, error)
+ UpdateStatus(ctx context.Context, volumeGroupSnapshotContent *v1beta2.VolumeGroupSnapshotContent, opts v1.UpdateOptions) (*v1beta2.VolumeGroupSnapshotContent, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta2.VolumeGroupSnapshotContent, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1beta2.VolumeGroupSnapshotContentList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.VolumeGroupSnapshotContent, err error)
+ VolumeGroupSnapshotContentExpansion
+}
+
+// volumeGroupSnapshotContents implements VolumeGroupSnapshotContentInterface
+type volumeGroupSnapshotContents struct {
+ client rest.Interface
+}
+
+// newVolumeGroupSnapshotContents returns a VolumeGroupSnapshotContents
+func newVolumeGroupSnapshotContents(c *GroupsnapshotV1beta2Client) *volumeGroupSnapshotContents {
+ return &volumeGroupSnapshotContents{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the volumeGroupSnapshotContent, and returns the corresponding volumeGroupSnapshotContent object, and an error if there is any.
+func (c *volumeGroupSnapshotContents) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.VolumeGroupSnapshotContent, err error) {
+ result = &v1beta2.VolumeGroupSnapshotContent{}
+ err = c.client.Get().
+ Resource("volumegroupsnapshotcontents").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of VolumeGroupSnapshotContents that match those selectors.
+func (c *volumeGroupSnapshotContents) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.VolumeGroupSnapshotContentList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1beta2.VolumeGroupSnapshotContentList{}
+ err = c.client.Get().
+ Resource("volumegroupsnapshotcontents").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested volumeGroupSnapshotContents.
+func (c *volumeGroupSnapshotContents) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("volumegroupsnapshotcontents").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a volumeGroupSnapshotContent and creates it. Returns the server's representation of the volumeGroupSnapshotContent, and an error, if there is any.
+func (c *volumeGroupSnapshotContents) Create(ctx context.Context, volumeGroupSnapshotContent *v1beta2.VolumeGroupSnapshotContent, opts v1.CreateOptions) (result *v1beta2.VolumeGroupSnapshotContent, err error) {
+ result = &v1beta2.VolumeGroupSnapshotContent{}
+ err = c.client.Post().
+ Resource("volumegroupsnapshotcontents").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(volumeGroupSnapshotContent).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a volumeGroupSnapshotContent and updates it. Returns the server's representation of the volumeGroupSnapshotContent, and an error, if there is any.
+func (c *volumeGroupSnapshotContents) Update(ctx context.Context, volumeGroupSnapshotContent *v1beta2.VolumeGroupSnapshotContent, opts v1.UpdateOptions) (result *v1beta2.VolumeGroupSnapshotContent, err error) {
+ result = &v1beta2.VolumeGroupSnapshotContent{}
+ err = c.client.Put().
+ Resource("volumegroupsnapshotcontents").
+ Name(volumeGroupSnapshotContent.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(volumeGroupSnapshotContent).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *volumeGroupSnapshotContents) UpdateStatus(ctx context.Context, volumeGroupSnapshotContent *v1beta2.VolumeGroupSnapshotContent, opts v1.UpdateOptions) (result *v1beta2.VolumeGroupSnapshotContent, err error) {
+ result = &v1beta2.VolumeGroupSnapshotContent{}
+ err = c.client.Put().
+ Resource("volumegroupsnapshotcontents").
+ Name(volumeGroupSnapshotContent.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(volumeGroupSnapshotContent).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the volumeGroupSnapshotContent and deletes it. Returns an error if one occurs.
+func (c *volumeGroupSnapshotContents) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("volumegroupsnapshotcontents").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *volumeGroupSnapshotContents) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("volumegroupsnapshotcontents").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched volumeGroupSnapshotContent.
+func (c *volumeGroupSnapshotContents) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.VolumeGroupSnapshotContent, err error) {
+ result = &v1beta2.VolumeGroupSnapshotContent{}
+ err = c.client.Patch(pt).
+ Resource("volumegroupsnapshotcontents").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/client/config/crd/groupsnapshot.storage.k8s.io_volumegroupsnapshotclasses.yaml b/client/config/crd/groupsnapshot.storage.k8s.io_volumegroupsnapshotclasses.yaml
index e552f81b5..81299d5ec 100644
--- a/client/config/crd/groupsnapshot.storage.k8s.io_volumegroupsnapshotclasses.yaml
+++ b/client/config/crd/groupsnapshot.storage.k8s.io_volumegroupsnapshotclasses.yaml
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/1150"
+ api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/1337"
controller-gen.kubebuilder.io/version: v0.15.0
name: volumegroupsnapshotclasses.groupsnapshot.storage.k8s.io
spec:
@@ -31,6 +31,7 @@ spec:
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
+ deprecated: true
name: v1beta1
schema:
openAPIV3Schema:
@@ -90,5 +91,88 @@ spec:
- driver
type: object
served: true
+ storage: false
+ subresources: {}
+ - additionalPrinterColumns:
+ - jsonPath: .driver
+ name: Driver
+ type: string
+ - description: Determines whether a VolumeGroupSnapshotContent created through
+ the VolumeGroupSnapshotClass should be deleted when its bound VolumeGroupSnapshot
+ is deleted.
+ jsonPath: .deletionPolicy
+ name: DeletionPolicy
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: |-
+ VolumeGroupSnapshotClass specifies parameters that a underlying storage system
+ uses when creating a volume group snapshot. A specific VolumeGroupSnapshotClass
+ is used by specifying its name in a VolumeGroupSnapshot object.
+ VolumeGroupSnapshotClasses are non-namespaced.
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ deletionPolicy:
+ description: |-
+ DeletionPolicy determines whether a VolumeGroupSnapshotContent created
+ through the VolumeGroupSnapshotClass should be deleted when its bound
+ VolumeGroupSnapshot is deleted.
+ Supported values are "Retain" and "Delete".
+ "Retain" means that the VolumeGroupSnapshotContent and its physical group
+ snapshot on underlying storage system are kept.
+ "Delete" means that the VolumeGroupSnapshotContent and its physical group
+ snapshot on underlying storage system are deleted.
+ Required.
+ enum:
+ - Delete
+ - Retain
+ type: string
+ x-kubernetes-validations:
+ - message: deletionPolicy is immutable once set
+ rule: self == oldSelf
+ driver:
+ description: |-
+ Driver is the name of the storage driver expected to handle this VolumeGroupSnapshotClass.
+ Required.
+ type: string
+ x-kubernetes-validations:
+ - message: driver is immutable once set
+ rule: self == oldSelf
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ parameters:
+ additionalProperties:
+ type: string
+ description: |-
+ Parameters is a key-value map with storage driver specific parameters for
+ creating group snapshots.
+ These values are opaque to Kubernetes and are passed directly to the driver.
+ type: object
+ x-kubernetes-validations:
+ - message: parameters are immutable once set
+ rule: self == oldSelf
+ required:
+ - deletionPolicy
+ - driver
+ type: object
+ served: true
storage: true
subresources: {}
diff --git a/client/config/crd/groupsnapshot.storage.k8s.io_volumegroupsnapshotcontents.yaml b/client/config/crd/groupsnapshot.storage.k8s.io_volumegroupsnapshotcontents.yaml
index a6d15d8ad..235198ac4 100644
--- a/client/config/crd/groupsnapshot.storage.k8s.io_volumegroupsnapshotcontents.yaml
+++ b/client/config/crd/groupsnapshot.storage.k8s.io_volumegroupsnapshotcontents.yaml
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/1150"
+ api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/1337"
controller-gen.kubebuilder.io/version: v0.15.0
name: volumegroupsnapshotcontents.groupsnapshot.storage.k8s.io
spec:
@@ -17,6 +17,15 @@ spec:
- vgscs
singular: volumegroupsnapshotcontent
scope: Cluster
+ conversion:
+ strategy: Webhook
+ webhook:
+ conversionReviewVersions: ["v1"]
+ clientConfig:
+ service:
+ namespace: default
+ name: snapshot-conversion-webhook-service
+ path: /convert
versions:
- additionalPrinterColumns:
- description: Indicates if all the individual snapshots in the group are ready
@@ -53,6 +62,7 @@ spec:
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
+ deprecated: true
name: v1beta1
schema:
openAPIV3Schema:
@@ -319,6 +329,333 @@ spec:
- spec
type: object
served: true
+ storage: false
+ subresources:
+ status: {}
+ - additionalPrinterColumns:
+ - description: Indicates if all the individual snapshots in the group are ready
+ to be used to restore a group of volumes.
+ jsonPath: .status.readyToUse
+ name: ReadyToUse
+ type: boolean
+ - description: Determines whether this VolumeGroupSnapshotContent and its physical
+ group snapshot on the underlying storage system should be deleted when its
+ bound VolumeGroupSnapshot is deleted.
+ jsonPath: .spec.deletionPolicy
+ name: DeletionPolicy
+ type: string
+ - description: Name of the CSI driver used to create the physical group snapshot
+ on the underlying storage system.
+ jsonPath: .spec.driver
+ name: Driver
+ type: string
+ - description: Name of the VolumeGroupSnapshotClass from which this group snapshot
+ was (or will be) created.
+ jsonPath: .spec.volumeGroupSnapshotClassName
+ name: VolumeGroupSnapshotClass
+ type: string
+ - description: Namespace of the VolumeGroupSnapshot object to which this VolumeGroupSnapshotContent
+ object is bound.
+ jsonPath: .spec.volumeGroupSnapshotRef.namespace
+ name: VolumeGroupSnapshotNamespace
+ type: string
+ - description: Name of the VolumeGroupSnapshot object to which this VolumeGroupSnapshotContent
+ object is bound.
+ jsonPath: .spec.volumeGroupSnapshotRef.name
+ name: VolumeGroupSnapshot
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: |-
+ VolumeGroupSnapshotContent represents the actual "on-disk" group snapshot object
+ in the underlying storage system
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: |-
+ Spec defines properties of a VolumeGroupSnapshotContent created by the underlying storage system.
+ Required.
+ properties:
+ deletionPolicy:
+ description: |-
+ DeletionPolicy determines whether this VolumeGroupSnapshotContent and the
+ physical group snapshot on the underlying storage system should be deleted
+ when the bound VolumeGroupSnapshot is deleted.
+ Supported values are "Retain" and "Delete".
+ "Retain" means that the VolumeGroupSnapshotContent and its physical group
+ snapshot on underlying storage system are kept.
+ "Delete" means that the VolumeGroupSnapshotContent and its physical group
+ snapshot on underlying storage system are deleted.
+ For dynamically provisioned group snapshots, this field will automatically
+ be filled in by the CSI snapshotter sidecar with the "DeletionPolicy" field
+ defined in the corresponding VolumeGroupSnapshotClass.
+ For pre-existing snapshots, users MUST specify this field when creating the
+ VolumeGroupSnapshotContent object.
+ Required.
+ enum:
+ - Delete
+ - Retain
+ type: string
+ driver:
+ description: |-
+ Driver is the name of the CSI driver used to create the physical group snapshot on
+ the underlying storage system.
+ This MUST be the same as the name returned by the CSI GetPluginName() call for
+ that driver.
+ Required.
+ type: string
+ x-kubernetes-validations:
+ - message: driver is immutable once set
+ rule: self == oldSelf
+ source:
+ description: |-
+ Source specifies whether the snapshot is (or should be) dynamically provisioned
+ or already exists, and just requires a Kubernetes object representation.
+ This field is immutable after creation.
+ Required.
+ properties:
+ groupSnapshotHandles:
+ description: |-
+ GroupSnapshotHandles specifies the CSI "group_snapshot_id" of a pre-existing
+ group snapshot and a list of CSI "snapshot_id" of pre-existing snapshots
+ on the underlying storage system for which a Kubernetes object
+ representation was (or should be) created.
+ This field is immutable.
+ properties:
+ volumeGroupSnapshotHandle:
+ description: |-
+ VolumeGroupSnapshotHandle specifies the CSI "group_snapshot_id" of a pre-existing
+ group snapshot on the underlying storage system for which a Kubernetes object
+ representation was (or should be) created.
+ This field is immutable.
+ Required.
+ type: string
+ volumeSnapshotHandles:
+ description: |-
+ VolumeSnapshotHandles is a list of CSI "snapshot_id" of pre-existing
+ snapshots on the underlying storage system for which Kubernetes objects
+ representation were (or should be) created.
+ This field is immutable.
+ Required.
+ items:
+ type: string
+ type: array
+ required:
+ - volumeGroupSnapshotHandle
+ - volumeSnapshotHandles
+ type: object
+ x-kubernetes-validations:
+ - message: groupSnapshotHandles is immutable
+ rule: self == oldSelf
+ volumeHandles:
+ description: |-
+ VolumeHandles is a list of volume handles on the backend to be snapshotted
+ together. It is specified for dynamic provisioning of the VolumeGroupSnapshot.
+ This field is immutable.
+ items:
+ type: string
+ type: array
+ x-kubernetes-validations:
+ - message: volumeHandles is immutable
+ rule: self == oldSelf
+ type: object
+ x-kubernetes-validations:
+ - message: volumeHandles is required once set
+ rule: '!has(oldSelf.volumeHandles) || has(self.volumeHandles)'
+ - message: groupSnapshotHandles is required once set
+ rule: '!has(oldSelf.groupSnapshotHandles) || has(self.groupSnapshotHandles)'
+ - message: exactly one of volumeHandles and groupSnapshotHandles must
+ be set
+ rule: (has(self.volumeHandles) && !has(self.groupSnapshotHandles))
+ || (!has(self.volumeHandles) && has(self.groupSnapshotHandles))
+ volumeGroupSnapshotClassName:
+ description: |-
+ VolumeGroupSnapshotClassName is the name of the VolumeGroupSnapshotClass from
+ which this group snapshot was (or will be) created.
+ Note that after provisioning, the VolumeGroupSnapshotClass may be deleted or
+ recreated with different set of values, and as such, should not be referenced
+ post-snapshot creation.
+ For dynamic provisioning, this field must be set.
+ This field may be unset for pre-provisioned snapshots.
+ type: string
+ x-kubernetes-validations:
+ - message: volumeGroupSnapshotClassName is immutable once set
+ rule: self == oldSelf
+ volumeGroupSnapshotRef:
+ description: |-
+ VolumeGroupSnapshotRef specifies the VolumeGroupSnapshot object to which this
+ VolumeGroupSnapshotContent object is bound.
+ VolumeGroupSnapshot.Spec.VolumeGroupSnapshotContentName field must reference to
+ this VolumeGroupSnapshotContent's name for the bidirectional binding to be valid.
+ For a pre-existing VolumeGroupSnapshotContent object, name and namespace of the
+ VolumeGroupSnapshot object MUST be provided for binding to happen.
+ This field is immutable after creation.
+ Required.
+ properties:
+ apiVersion:
+ description: API version of the referent.
+ type: string
+ fieldPath:
+ description: |-
+ If referring to a piece of an object instead of an entire object, this string
+ should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
+ For example, if the object reference is to a container within a pod, this would take on a value like:
+ "spec.containers{name}" (where "name" refers to the name of the container that triggered
+ the event) or if no container name is specified "spec.containers[2]" (container with
+ index 2 in this pod). This syntax is chosen only to have some well-defined way of
+ referencing a part of an object.
+ TODO: this design is not final and this field is subject to change in the future.
+ type: string
+ kind:
+ description: |-
+ Kind of the referent.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ namespace:
+ description: |-
+ Namespace of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
+ type: string
+ resourceVersion:
+ description: |-
+ Specific resourceVersion to which this reference is made, if any.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
+ type: string
+ uid:
+ description: |-
+ UID of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ x-kubernetes-validations:
+ - message: both volumeGroupSnapshotRef.name and volumeGroupSnapshotRef.namespace
+ must be set
+ rule: has(self.name) && has(self.__namespace__)
+ - message: volumeGroupSnapshotRef.name and volumeGroupSnapshotRef.namespace
+ are immutable
+ rule: self.name == oldSelf.name && self.__namespace__ == oldSelf.__namespace__
+ - message: volumeGroupSnapshotRef.uid is immutable once set
+ rule: '!has(oldSelf.uid) || (has(self.uid) && self.uid == oldSelf.uid)'
+ required:
+ - deletionPolicy
+ - driver
+ - source
+ - volumeGroupSnapshotRef
+ type: object
+ status:
+ description: status represents the current information of a group snapshot.
+ properties:
+ creationTime:
+ description: |-
+ CreationTime is the timestamp when the point-in-time group snapshot is taken
+ by the underlying storage system.
+ If not specified, it indicates the creation time is unknown.
+ If not specified, it means the readiness of a group snapshot is unknown.
+ This field is the source for the CreationTime field in VolumeGroupSnapshotStatus
+ format: date-time
+ type: string
+ error:
+ description: |-
+ Error is the last observed error during group snapshot creation, if any.
+ Upon success after retry, this error field will be cleared.
+ properties:
+ message:
+ description: |-
+ message is a string detailing the encountered error during snapshot
+ creation if specified.
+ NOTE: message may be logged, and it should not contain sensitive
+ information.
+ type: string
+ time:
+ description: time is the timestamp when the error was encountered.
+ format: date-time
+ type: string
+ type: object
+ readyToUse:
+ description: |-
+ ReadyToUse indicates if all the individual snapshots in the group are ready to be
+ used to restore a group of volumes.
+ ReadyToUse becomes true when ReadyToUse of all individual snapshots become true.
+ type: boolean
+ volumeGroupSnapshotHandle:
+ description: |-
+ VolumeGroupSnapshotHandle is a unique id returned by the CSI driver
+ to identify the VolumeGroupSnapshot on the storage system.
+ If a storage system does not provide such an id, the
+ CSI driver can choose to return the VolumeGroupSnapshot name.
+ type: string
+ x-kubernetes-validations:
+ - message: volumeGroupSnapshotHandle is immutable once set
+ rule: self == oldSelf
+ volumeSnapshotInfoList:
+ description: |-
+ This field is introduced in v1beta2
+ It is replacing VolumeSnapshotHandlePairList
+ VolumeSnapshotInfoList is a list of snapshot information returned by
+ by the CSI driver to identify snapshots on the storage system.
+ items:
+ description: |-
+ The VolumeSnapshotInfo struct is added in v1beta2
+ VolumeSnapshotInfo contains information for a snapshot
+ properties:
+ creationTime:
+ description: |-
+ creationTime is the timestamp when the point-in-time snapshot is taken
+ by the underlying storage system.
+ format: int64
+ type: integer
+ readyToUse:
+ description: ReadyToUse indicates if the snapshot is ready to
+ be used to restore a volume.
+ type: boolean
+ restoreSize:
+ description: |-
+ RestoreSize represents the minimum size of volume required to create a volume
+ from this snapshot.
+ format: int64
+ type: integer
+ snapshotHandle:
+ description: SnapshotHandle is the CSI "snapshot_id" of this
+ snapshot on the underlying storage system.
+ type: string
+ volumeHandle:
+ description: |-
+ VolumeHandle specifies the CSI "volume_id" of the volume from which this snapshot
+ was taken from.
+ type: string
+ type: object
+ type: array
+ type: object
+ required:
+ - spec
+ type: object
+ served: true
storage: true
subresources:
status: {}
diff --git a/client/config/crd/groupsnapshot.storage.k8s.io_volumegroupsnapshots.yaml b/client/config/crd/groupsnapshot.storage.k8s.io_volumegroupsnapshots.yaml
index 145d1211d..3f8c4909a 100644
--- a/client/config/crd/groupsnapshot.storage.k8s.io_volumegroupsnapshots.yaml
+++ b/client/config/crd/groupsnapshot.storage.k8s.io_volumegroupsnapshots.yaml
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/1150"
+ api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/1337"
controller-gen.kubebuilder.io/version: v0.15.0
name: volumegroupsnapshots.groupsnapshot.storage.k8s.io
spec:
@@ -43,6 +43,7 @@ spec:
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
+ deprecated: true
name: v1beta1
schema:
openAPIV3Schema:
@@ -234,6 +235,226 @@ spec:
- spec
type: object
served: true
+ storage: false
+ subresources:
+ status: {}
+ - additionalPrinterColumns:
+ - description: Indicates if all the individual snapshots in the group are ready
+ to be used to restore a group of volumes.
+ jsonPath: .status.readyToUse
+ name: ReadyToUse
+ type: boolean
+ - description: The name of the VolumeGroupSnapshotClass requested by the VolumeGroupSnapshot.
+ jsonPath: .spec.volumeGroupSnapshotClassName
+ name: VolumeGroupSnapshotClass
+ type: string
+ - description: Name of the VolumeGroupSnapshotContent object to which the VolumeGroupSnapshot
+ object intends to bind to. Please note that verification of binding actually
+ requires checking both VolumeGroupSnapshot and VolumeGroupSnapshotContent
+ to ensure both are pointing at each other. Binding MUST be verified prior
+ to usage of this object.
+ jsonPath: .status.boundVolumeGroupSnapshotContentName
+ name: VolumeGroupSnapshotContent
+ type: string
+ - description: Timestamp when the point-in-time group snapshot was taken by the
+ underlying storage system.
+ jsonPath: .status.creationTime
+ name: CreationTime
+ type: date
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: |-
+ VolumeGroupSnapshot is a user's request for creating either a point-in-time
+ group snapshot or binding to a pre-existing group snapshot.
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: |-
+ Spec defines the desired characteristics of a group snapshot requested by a user.
+ Required.
+ properties:
+ source:
+ description: |-
+ Source specifies where a group snapshot will be created from.
+ This field is immutable after creation.
+ Required.
+ properties:
+ selector:
+ description: |-
+ Selector is a label query over persistent volume claims that are to be
+ grouped together for snapshotting.
+ This labelSelector will be used to match the label added to a PVC.
+ If the label is added or removed to a volume after a group snapshot
+ is created, the existing group snapshots won't be modified.
+ Once a VolumeGroupSnapshotContent is created and the sidecar starts to process
+ it, the volume list will not change with retries.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ x-kubernetes-validations:
+ - message: selector is immutable
+ rule: self == oldSelf
+ volumeGroupSnapshotContentName:
+ description: |-
+ VolumeGroupSnapshotContentName specifies the name of a pre-existing VolumeGroupSnapshotContent
+ object representing an existing volume group snapshot.
+ This field should be set if the volume group snapshot already exists and
+ only needs a representation in Kubernetes.
+ This field is immutable.
+ type: string
+ x-kubernetes-validations:
+ - message: volumeGroupSnapshotContentName is immutable
+ rule: self == oldSelf
+ type: object
+ x-kubernetes-validations:
+ - message: selector is required once set
+ rule: '!has(oldSelf.selector) || has(self.selector)'
+ - message: volumeGroupSnapshotContentName is required once set
+ rule: '!has(oldSelf.volumeGroupSnapshotContentName) || has(self.volumeGroupSnapshotContentName)'
+ - message: exactly one of selector and volumeGroupSnapshotContentName
+ must be set
+ rule: (has(self.selector) && !has(self.volumeGroupSnapshotContentName))
+ || (!has(self.selector) && has(self.volumeGroupSnapshotContentName))
+ volumeGroupSnapshotClassName:
+ description: |-
+ VolumeGroupSnapshotClassName is the name of the VolumeGroupSnapshotClass
+ requested by the VolumeGroupSnapshot.
+ VolumeGroupSnapshotClassName may be left nil to indicate that the default
+ class will be used.
+ Empty string is not allowed for this field.
+ type: string
+ x-kubernetes-validations:
+ - message: volumeGroupSnapshotClassName must not be the empty string
+ when set
+ rule: size(self) > 0
+ required:
+ - source
+ type: object
+ status:
+ description: |-
+ Status represents the current information of a group snapshot.
+ Consumers must verify binding between VolumeGroupSnapshot and
+ VolumeGroupSnapshotContent objects is successful (by validating that both
+ VolumeGroupSnapshot and VolumeGroupSnapshotContent point to each other) before
+ using this object.
+ properties:
+ boundVolumeGroupSnapshotContentName:
+ description: |-
+ BoundVolumeGroupSnapshotContentName is the name of the VolumeGroupSnapshotContent
+ object to which this VolumeGroupSnapshot object intends to bind to.
+ If not specified, it indicates that the VolumeGroupSnapshot object has not
+ been successfully bound to a VolumeGroupSnapshotContent object yet.
+ NOTE: To avoid possible security issues, consumers must verify binding between
+ VolumeGroupSnapshot and VolumeGroupSnapshotContent objects is successful
+ (by validating that both VolumeGroupSnapshot and VolumeGroupSnapshotContent
+ point at each other) before using this object.
+ type: string
+ x-kubernetes-validations:
+ - message: boundVolumeGroupSnapshotContentName is immutable once set
+ rule: self == oldSelf
+ creationTime:
+ description: |-
+ CreationTime is the timestamp when the point-in-time group snapshot is taken
+ by the underlying storage system.
+ If not specified, it may indicate that the creation time of the group snapshot
+ is unknown.
+ This field is updated based on the CreationTime field in VolumeGroupSnapshotContentStatus
+ format: date-time
+ type: string
+ error:
+ description: |-
+ Error is the last observed error during group snapshot creation, if any.
+ This field could be helpful to upper level controllers (i.e., application
+ controller) to decide whether they should continue on waiting for the group
+ snapshot to be created based on the type of error reported.
+ The snapshot controller will keep retrying when an error occurs during the
+ group snapshot creation. Upon success, this error field will be cleared.
+ properties:
+ message:
+ description: |-
+ message is a string detailing the encountered error during snapshot
+ creation if specified.
+ NOTE: message may be logged, and it should not contain sensitive
+ information.
+ type: string
+ time:
+ description: time is the timestamp when the error was encountered.
+ format: date-time
+ type: string
+ type: object
+ readyToUse:
+ description: |-
+ ReadyToUse indicates if all the individual snapshots in the group are ready
+ to be used to restore a group of volumes.
+ ReadyToUse becomes true when ReadyToUse of all individual snapshots become true.
+ If not specified, it means the readiness of a group snapshot is unknown.
+ type: boolean
+ type: object
+ required:
+ - spec
+ type: object
+ served: true
storage: true
subresources:
status: {}
diff --git a/client/hack/README.md b/client/hack/README.md
index 1246280ae..7bb0cdc41 100644
--- a/client/hack/README.md
+++ b/client/hack/README.md
@@ -121,18 +121,9 @@ are correctly working.
```
./hack/run-cel-tests.sh
- cel-tests/volumegroupsnapshotcontent/vgsc-change-ref-namespace.post.yaml: SUCCESS
- cel-tests/volumegroupsnapshotcontent/vgsc-source-volume-to-groupsnapshot.post.yaml: SUCCESS
- cel-tests/volumegroupsnapshotcontent/vgsc-source-empty.yaml: SUCCESS (expected failure)
- cel-tests/volumegroupsnapshotcontent/vgsc-change-ref-namespace.pre.yaml: SUCCESS
- cel-tests/volumegroupsnapshotcontent/vgsc-ref-only-name.yaml: SUCCESS (expected failure)
- [...]
- cel-tests/volumegroupsnapshotcontent/vgsc-change-ref-namespace.pre.yaml -> cel-tests/volumegroupsnapshotcontent/vgsc-change-ref-namespace.post.yaml: SUCCESS (expected failure)
- cel-tests/volumegroupsnapshotcontent/vgsc-source-volume-immutable.pre.yaml -> cel-tests/volumegroupsnapshotcontent/vgsc-source-volume-immutable.post.yaml: SUCCESS (expected failure)
- cel-tests/volumegroupsnapshotcontent/vgsc-source-volume-to-groupsnapshot.pre.yaml -> cel-tests/volumegroupsnapshotcontent/vgsc-source-volume-to-groupsnapshot.post.yaml: SUCCESS (expected failure)
- cel-tests/volumegroupsnapshotcontent/vgsc-source-groupsnapshot-immutable.pre.yaml -> cel-tests/volumegroupsnapshotcontent/vgsc-source-groupsnapshot-immutable.post.yaml: SUCCESS (expected failure)
- [...]
-
- SUCCESS: 90
+ SUCCESS: 108
FAILURES: 0
```
+
+Adding the `-v` option to `run-cel-tests.sh` will show the output of each
+test case.
diff --git a/client/hack/cel-tests/volumegroupsnapshot/vgs-class-empty-string.yaml b/client/hack/cel-tests/volumegroupsnapshot/vgs-class-empty-string.yaml
index 854634fb3..fce19f287 100644
--- a/client/hack/cel-tests/volumegroupsnapshot/vgs-class-empty-string.yaml
+++ b/client/hack/cel-tests/volumegroupsnapshot/vgs-class-empty-string.yaml
@@ -1,5 +1,5 @@
---
-apiVersion: groupsnapshot.storage.k8s.io/v1beta1
+apiVersion: groupsnapshot.storage.k8s.io/v1beta2
kind: VolumeGroupSnapshot
metadata:
name: new-groupsnapshot-demo
diff --git a/client/hack/cel-tests/volumegroupsnapshot/vgs-content-immutable.post.yaml b/client/hack/cel-tests/volumegroupsnapshot/vgs-content-immutable.post.yaml
index 722f46700..ce6acb2be 100644
--- a/client/hack/cel-tests/volumegroupsnapshot/vgs-content-immutable.post.yaml
+++ b/client/hack/cel-tests/volumegroupsnapshot/vgs-content-immutable.post.yaml
@@ -1,5 +1,5 @@
---
-apiVersion: groupsnapshot.storage.k8s.io/v1beta1
+apiVersion: groupsnapshot.storage.k8s.io/v1beta2
kind: VolumeGroupSnapshot
metadata:
name: new-groupsnapshot-demo
diff --git a/client/hack/cel-tests/volumegroupsnapshot/vgs-content-immutable.pre.yaml b/client/hack/cel-tests/volumegroupsnapshot/vgs-content-immutable.pre.yaml
index 01416b5fe..ffb87c730 100644
--- a/client/hack/cel-tests/volumegroupsnapshot/vgs-content-immutable.pre.yaml
+++ b/client/hack/cel-tests/volumegroupsnapshot/vgs-content-immutable.pre.yaml
@@ -1,5 +1,5 @@
---
-apiVersion: groupsnapshot.storage.k8s.io/v1beta1
+apiVersion: groupsnapshot.storage.k8s.io/v1beta2
kind: VolumeGroupSnapshot
metadata:
name: new-groupsnapshot-demo
diff --git a/client/hack/cel-tests/volumegroupsnapshot/vgs-content-to-selector.post.yaml b/client/hack/cel-tests/volumegroupsnapshot/vgs-content-to-selector.post.yaml
index a6177b87e..eb3292f0b 100644
--- a/client/hack/cel-tests/volumegroupsnapshot/vgs-content-to-selector.post.yaml
+++ b/client/hack/cel-tests/volumegroupsnapshot/vgs-content-to-selector.post.yaml
@@ -1,5 +1,5 @@
---
-apiVersion: groupsnapshot.storage.k8s.io/v1beta1
+apiVersion: groupsnapshot.storage.k8s.io/v1beta2
kind: VolumeGroupSnapshot
metadata:
name: new-groupsnapshot-demo
diff --git a/client/hack/cel-tests/volumegroupsnapshot/vgs-content-to-selector.pre.yaml b/client/hack/cel-tests/volumegroupsnapshot/vgs-content-to-selector.pre.yaml
index 01416b5fe..ffb87c730 100644
--- a/client/hack/cel-tests/volumegroupsnapshot/vgs-content-to-selector.pre.yaml
+++ b/client/hack/cel-tests/volumegroupsnapshot/vgs-content-to-selector.pre.yaml
@@ -1,5 +1,5 @@
---
-apiVersion: groupsnapshot.storage.k8s.io/v1beta1
+apiVersion: groupsnapshot.storage.k8s.io/v1beta2
kind: VolumeGroupSnapshot
metadata:
name: new-groupsnapshot-demo
diff --git a/client/hack/cel-tests/volumegroupsnapshot/vgs-no-class.yaml b/client/hack/cel-tests/volumegroupsnapshot/vgs-no-class.yaml
index ee4213770..188ca8256 100644
--- a/client/hack/cel-tests/volumegroupsnapshot/vgs-no-class.yaml
+++ b/client/hack/cel-tests/volumegroupsnapshot/vgs-no-class.yaml
@@ -1,5 +1,5 @@
---
-apiVersion: groupsnapshot.storage.k8s.io/v1beta1
+apiVersion: groupsnapshot.storage.k8s.io/v1beta2
kind: VolumeGroupSnapshot
metadata:
name: new-groupsnapshot-demo
diff --git a/client/hack/cel-tests/volumegroupsnapshot/vgs-selector-immutable.post.yaml b/client/hack/cel-tests/volumegroupsnapshot/vgs-selector-immutable.post.yaml
index 9f92303a2..8f8349c20 100644
--- a/client/hack/cel-tests/volumegroupsnapshot/vgs-selector-immutable.post.yaml
+++ b/client/hack/cel-tests/volumegroupsnapshot/vgs-selector-immutable.post.yaml
@@ -1,5 +1,5 @@
---
-apiVersion: groupsnapshot.storage.k8s.io/v1beta1
+apiVersion: groupsnapshot.storage.k8s.io/v1beta2
kind: VolumeGroupSnapshot
metadata:
name: new-groupsnapshot-demo
diff --git a/client/hack/cel-tests/volumegroupsnapshot/vgs-selector-immutable.pre.yaml b/client/hack/cel-tests/volumegroupsnapshot/vgs-selector-immutable.pre.yaml
index a6177b87e..eb3292f0b 100644
--- a/client/hack/cel-tests/volumegroupsnapshot/vgs-selector-immutable.pre.yaml
+++ b/client/hack/cel-tests/volumegroupsnapshot/vgs-selector-immutable.pre.yaml
@@ -1,5 +1,5 @@
---
-apiVersion: groupsnapshot.storage.k8s.io/v1beta1
+apiVersion: groupsnapshot.storage.k8s.io/v1beta2
kind: VolumeGroupSnapshot
metadata:
name: new-groupsnapshot-demo
diff --git a/client/hack/cel-tests/volumegroupsnapshot/vgs-selector-to-content.post.yaml b/client/hack/cel-tests/volumegroupsnapshot/vgs-selector-to-content.post.yaml
index 01416b5fe..ffb87c730 100644
--- a/client/hack/cel-tests/volumegroupsnapshot/vgs-selector-to-content.post.yaml
+++ b/client/hack/cel-tests/volumegroupsnapshot/vgs-selector-to-content.post.yaml
@@ -1,5 +1,5 @@
---
-apiVersion: groupsnapshot.storage.k8s.io/v1beta1
+apiVersion: groupsnapshot.storage.k8s.io/v1beta2
kind: VolumeGroupSnapshot
metadata:
name: new-groupsnapshot-demo
diff --git a/client/hack/cel-tests/volumegroupsnapshot/vgs-selector-to-content.pre.yaml b/client/hack/cel-tests/volumegroupsnapshot/vgs-selector-to-content.pre.yaml
index a6177b87e..eb3292f0b 100644
--- a/client/hack/cel-tests/volumegroupsnapshot/vgs-selector-to-content.pre.yaml
+++ b/client/hack/cel-tests/volumegroupsnapshot/vgs-selector-to-content.pre.yaml
@@ -1,5 +1,5 @@
---
-apiVersion: groupsnapshot.storage.k8s.io/v1beta1
+apiVersion: groupsnapshot.storage.k8s.io/v1beta2
kind: VolumeGroupSnapshot
metadata:
name: new-groupsnapshot-demo
diff --git a/client/hack/cel-tests/volumegroupsnapshot/vgs-with-content.yaml b/client/hack/cel-tests/volumegroupsnapshot/vgs-with-content.yaml
index 01416b5fe..ffb87c730 100644
--- a/client/hack/cel-tests/volumegroupsnapshot/vgs-with-content.yaml
+++ b/client/hack/cel-tests/volumegroupsnapshot/vgs-with-content.yaml
@@ -1,5 +1,5 @@
---
-apiVersion: groupsnapshot.storage.k8s.io/v1beta1
+apiVersion: groupsnapshot.storage.k8s.io/v1beta2
kind: VolumeGroupSnapshot
metadata:
name: new-groupsnapshot-demo
diff --git a/client/hack/cel-tests/volumegroupsnapshot/vgs-with-no-selector-no-content.yaml b/client/hack/cel-tests/volumegroupsnapshot/vgs-with-no-selector-no-content.yaml
index c10d3ca3d..dccbe016e 100644
--- a/client/hack/cel-tests/volumegroupsnapshot/vgs-with-no-selector-no-content.yaml
+++ b/client/hack/cel-tests/volumegroupsnapshot/vgs-with-no-selector-no-content.yaml
@@ -1,5 +1,5 @@
---
-apiVersion: groupsnapshot.storage.k8s.io/v1beta1
+apiVersion: groupsnapshot.storage.k8s.io/v1beta2
kind: VolumeGroupSnapshot
metadata:
name: new-groupsnapshot-demo
diff --git a/client/hack/cel-tests/volumegroupsnapshot/vgs-with-selector-and-content.yaml b/client/hack/cel-tests/volumegroupsnapshot/vgs-with-selector-and-content.yaml
index ffee8cd4f..ce2cb0da5 100644
--- a/client/hack/cel-tests/volumegroupsnapshot/vgs-with-selector-and-content.yaml
+++ b/client/hack/cel-tests/volumegroupsnapshot/vgs-with-selector-and-content.yaml
@@ -1,5 +1,5 @@
---
-apiVersion: groupsnapshot.storage.k8s.io/v1beta1
+apiVersion: groupsnapshot.storage.k8s.io/v1beta2
kind: VolumeGroupSnapshot
metadata:
name: new-groupsnapshot-demo
diff --git a/client/hack/cel-tests/volumegroupsnapshot/vgs-with-selector.yaml b/client/hack/cel-tests/volumegroupsnapshot/vgs-with-selector.yaml
index a6177b87e..eb3292f0b 100644
--- a/client/hack/cel-tests/volumegroupsnapshot/vgs-with-selector.yaml
+++ b/client/hack/cel-tests/volumegroupsnapshot/vgs-with-selector.yaml
@@ -1,5 +1,5 @@
---
-apiVersion: groupsnapshot.storage.k8s.io/v1beta1
+apiVersion: groupsnapshot.storage.k8s.io/v1beta2
kind: VolumeGroupSnapshot
metadata:
name: new-groupsnapshot-demo
diff --git a/client/hack/cel-tests/volumegroupsnapshotclass/vgs-class-deletionpolicy-immutable.post.yaml b/client/hack/cel-tests/volumegroupsnapshotclass/vgs-class-deletionpolicy-immutable.post.yaml
new file mode 100644
index 000000000..affadf845
--- /dev/null
+++ b/client/hack/cel-tests/volumegroupsnapshotclass/vgs-class-deletionpolicy-immutable.post.yaml
@@ -0,0 +1,9 @@
+---
+apiVersion: groupsnapshot.storage.k8s.io/v1beta2
+kind: VolumeGroupSnapshotClass
+metadata:
+ name: csi-hostpath-groupsnapclass
+driver: hostpath.csi.k8s.io
+deletionPolicy: Retain
+parameters:
+ one: two
diff --git a/client/hack/cel-tests/volumegroupsnapshotclass/vgs-class-deletionpolicy-immutable.post.yaml.tx_err b/client/hack/cel-tests/volumegroupsnapshotclass/vgs-class-deletionpolicy-immutable.post.yaml.tx_err
new file mode 100644
index 000000000..303b5d51d
--- /dev/null
+++ b/client/hack/cel-tests/volumegroupsnapshotclass/vgs-class-deletionpolicy-immutable.post.yaml.tx_err
@@ -0,0 +1 @@
+deletionPolicy: Invalid value: "string": deletionPolicy is immutable once set
\ No newline at end of file
diff --git a/client/hack/cel-tests/volumegroupsnapshotclass/vgs-class-deletionpolicy-immutable.pre.yaml b/client/hack/cel-tests/volumegroupsnapshotclass/vgs-class-deletionpolicy-immutable.pre.yaml
new file mode 100644
index 000000000..b488d16d1
--- /dev/null
+++ b/client/hack/cel-tests/volumegroupsnapshotclass/vgs-class-deletionpolicy-immutable.pre.yaml
@@ -0,0 +1,9 @@
+---
+apiVersion: groupsnapshot.storage.k8s.io/v1beta2
+kind: VolumeGroupSnapshotClass
+metadata:
+ name: csi-hostpath-groupsnapclass
+driver: hostpath.csi.k8s.io
+deletionPolicy: Delete
+parameters:
+ one: two
diff --git a/client/hack/cel-tests/volumegroupsnapshotclass/vgs-class-parameters-immutable.post.yaml b/client/hack/cel-tests/volumegroupsnapshotclass/vgs-class-parameters-immutable.post.yaml
new file mode 100644
index 000000000..685964c31
--- /dev/null
+++ b/client/hack/cel-tests/volumegroupsnapshotclass/vgs-class-parameters-immutable.post.yaml
@@ -0,0 +1,9 @@
+---
+apiVersion: groupsnapshot.storage.k8s.io/v1beta2
+kind: VolumeGroupSnapshotClass
+metadata:
+ name: csi-hostpath-groupsnapclass
+driver: hostpath.csi.k8s.io
+deletionPolicy: Delete
+parameters:
+ one: three
\ No newline at end of file
diff --git a/client/hack/cel-tests/volumegroupsnapshotclass/vgs-class-parameters-immutable.post.yaml.tx_err b/client/hack/cel-tests/volumegroupsnapshotclass/vgs-class-parameters-immutable.post.yaml.tx_err
new file mode 100644
index 000000000..7b264a46f
--- /dev/null
+++ b/client/hack/cel-tests/volumegroupsnapshotclass/vgs-class-parameters-immutable.post.yaml.tx_err
@@ -0,0 +1 @@
+parameters: Invalid value: "object": parameters are immutable once set
diff --git a/client/hack/cel-tests/volumegroupsnapshotclass/vgs-class-parameters-immutable.pre.yaml b/client/hack/cel-tests/volumegroupsnapshotclass/vgs-class-parameters-immutable.pre.yaml
new file mode 100644
index 000000000..faf744dec
--- /dev/null
+++ b/client/hack/cel-tests/volumegroupsnapshotclass/vgs-class-parameters-immutable.pre.yaml
@@ -0,0 +1,9 @@
+---
+apiVersion: groupsnapshot.storage.k8s.io/v1beta2
+kind: VolumeGroupSnapshotClass
+metadata:
+ name: csi-hostpath-groupsnapclass
+driver: hostpath.csi.k8s.io
+deletionPolicy: Delete
+parameters:
+ one: two
\ No newline at end of file
diff --git a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-change-ref-name.post.yaml.tx_err b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-change-ref-name.post.yaml.tx_err
deleted file mode 100644
index fc10adb44..000000000
--- a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-change-ref-name.post.yaml.tx_err
+++ /dev/null
@@ -1 +0,0 @@
-volumeGroupSnapshotRef is immutable
diff --git a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-change-ref-namespace.post.yaml b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-change-ref-namespace.post.yaml
deleted file mode 100644
index 602fb5ea2..000000000
--- a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-change-ref-namespace.post.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-apiVersion: groupsnapshot.storage.k8s.io/v1beta1
-kind: VolumeGroupSnapshotContent
-metadata:
- name: new-groupsnapshotcontent-demo
-spec:
- volumeGroupSnapshotRef:
- name: new-groupsnapshot-demo
- namespace: default-changed
- driver: hostpath.csi.k8s.io
- source:
- volumeHandles:
- - handles
- deletionPolicy: Retain
diff --git a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-change-ref-namespace.post.yaml.tx_err b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-change-ref-namespace.post.yaml.tx_err
deleted file mode 100644
index fc10adb44..000000000
--- a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-change-ref-namespace.post.yaml.tx_err
+++ /dev/null
@@ -1 +0,0 @@
-volumeGroupSnapshotRef is immutable
diff --git a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-class-immutable.post.yaml b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-class-immutable.post.yaml
new file mode 100644
index 000000000..769efd060
--- /dev/null
+++ b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-class-immutable.post.yaml
@@ -0,0 +1,18 @@
+---
+apiVersion: groupsnapshot.storage.k8s.io/v1beta2
+kind: VolumeGroupSnapshotContent
+metadata:
+ name: new-groupsnapshotcontent-demo
+spec:
+ volumeGroupSnapshotRef:
+ name: new-groupsnapshot-demo
+ namespace: default
+ driver: hostpath.csi.k8s.io
+ volumeGroupSnapshotClassName: class-post
+ source:
+ groupSnapshotHandles:
+ volumeGroupSnapshotHandle: this-handle
+ volumeSnapshotHandles:
+ - handle
+ - another-handle
+ deletionPolicy: Retain
diff --git a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-class-immutable.post.yaml.tx_err b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-class-immutable.post.yaml.tx_err
new file mode 100644
index 000000000..8d57558c6
--- /dev/null
+++ b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-class-immutable.post.yaml.tx_err
@@ -0,0 +1 @@
+Invalid value: "string": volumeGroupSnapshotClassName is immutable once set
\ No newline at end of file
diff --git a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-change-ref-name.pre.yaml b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-class-immutable.pre.yaml
similarity index 50%
rename from client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-change-ref-name.pre.yaml
rename to client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-class-immutable.pre.yaml
index ee1ff2d91..58f70c59b 100644
--- a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-change-ref-name.pre.yaml
+++ b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-class-immutable.pre.yaml
@@ -1,5 +1,5 @@
---
-apiVersion: groupsnapshot.storage.k8s.io/v1beta1
+apiVersion: groupsnapshot.storage.k8s.io/v1beta2
kind: VolumeGroupSnapshotContent
metadata:
name: new-groupsnapshotcontent-demo
@@ -8,7 +8,11 @@ spec:
name: new-groupsnapshot-demo
namespace: default
driver: hostpath.csi.k8s.io
+ volumeGroupSnapshotClassName: class-pre
source:
- volumeHandles:
- - handles
+ groupSnapshotHandles:
+ volumeGroupSnapshotHandle: this-handle
+ volumeSnapshotHandles:
+ - handle
+ - another-handle
deletionPolicy: Retain
diff --git a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-driver-immutable.post.yaml b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-driver-immutable.post.yaml
new file mode 100644
index 000000000..f9fbedb19
--- /dev/null
+++ b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-driver-immutable.post.yaml
@@ -0,0 +1,17 @@
+---
+apiVersion: groupsnapshot.storage.k8s.io/v1beta2
+kind: VolumeGroupSnapshotContent
+metadata:
+ name: new-groupsnapshotcontent-demo
+spec:
+ volumeGroupSnapshotRef:
+ name: new-groupsnapshot-demo
+ namespace: default
+ driver: hostpathafter.csi.k8s.io
+ source:
+ groupSnapshotHandles:
+ volumeGroupSnapshotHandle: this-handle
+ volumeSnapshotHandles:
+ - handle
+ - another-handle
+ deletionPolicy: Retain
diff --git a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-driver-immutable.post.yaml.tx_err b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-driver-immutable.post.yaml.tx_err
new file mode 100644
index 000000000..7fa7d5609
--- /dev/null
+++ b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-driver-immutable.post.yaml.tx_err
@@ -0,0 +1 @@
+spec.driver: Invalid value: "string": driver is immutable once set
\ No newline at end of file
diff --git a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-change-ref-namespace.pre.yaml b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-driver-immutable.pre.yaml
similarity index 56%
rename from client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-change-ref-namespace.pre.yaml
rename to client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-driver-immutable.pre.yaml
index ee1ff2d91..6aa9ce13f 100644
--- a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-change-ref-namespace.pre.yaml
+++ b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-driver-immutable.pre.yaml
@@ -1,5 +1,5 @@
---
-apiVersion: groupsnapshot.storage.k8s.io/v1beta1
+apiVersion: groupsnapshot.storage.k8s.io/v1beta2
kind: VolumeGroupSnapshotContent
metadata:
name: new-groupsnapshotcontent-demo
@@ -9,6 +9,9 @@ spec:
namespace: default
driver: hostpath.csi.k8s.io
source:
- volumeHandles:
- - handles
+ groupSnapshotHandles:
+ volumeGroupSnapshotHandle: this-handle
+ volumeSnapshotHandles:
+ - handle
+ - another-handle
deletionPolicy: Retain
diff --git a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-mutate-deletionpolicy.post.yaml b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-mutate-deletionpolicy.post.yaml
new file mode 100644
index 000000000..a7563cdd3
--- /dev/null
+++ b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-mutate-deletionpolicy.post.yaml
@@ -0,0 +1,18 @@
+---
+apiVersion: groupsnapshot.storage.k8s.io/v1beta2
+kind: VolumeGroupSnapshotContent
+metadata:
+ name: new-groupsnapshotcontent-demo
+spec:
+ volumeGroupSnapshotRef:
+ name: new-groupsnapshot-demo
+ namespace: default
+ driver: hostpath.csi.k8s.io
+ volumeGroupSnapshotClassName: class-post
+ source:
+ groupSnapshotHandles:
+ volumeGroupSnapshotHandle: this-handle
+ volumeSnapshotHandles:
+ - handle
+ - another-handle
+ deletionPolicy: Delete
diff --git a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-mutate-deletionpolicy.pre.yaml b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-mutate-deletionpolicy.pre.yaml
new file mode 100644
index 000000000..769efd060
--- /dev/null
+++ b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-mutate-deletionpolicy.pre.yaml
@@ -0,0 +1,18 @@
+---
+apiVersion: groupsnapshot.storage.k8s.io/v1beta2
+kind: VolumeGroupSnapshotContent
+metadata:
+ name: new-groupsnapshotcontent-demo
+spec:
+ volumeGroupSnapshotRef:
+ name: new-groupsnapshot-demo
+ namespace: default
+ driver: hostpath.csi.k8s.io
+ volumeGroupSnapshotClassName: class-post
+ source:
+ groupSnapshotHandles:
+ volumeGroupSnapshotHandle: this-handle
+ volumeSnapshotHandles:
+ - handle
+ - another-handle
+ deletionPolicy: Retain
diff --git a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-ok.yaml b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-ok.yaml
index ee1ff2d91..1e0663454 100644
--- a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-ok.yaml
+++ b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-ok.yaml
@@ -1,5 +1,5 @@
---
-apiVersion: groupsnapshot.storage.k8s.io/v1beta1
+apiVersion: groupsnapshot.storage.k8s.io/v1beta2
kind: VolumeGroupSnapshotContent
metadata:
name: new-groupsnapshotcontent-demo
diff --git a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-change-ref-name.post.yaml b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-ref-name-immutable.post.yaml
similarity index 51%
rename from client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-change-ref-name.post.yaml
rename to client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-ref-name-immutable.post.yaml
index 3afd1f51b..7e414862e 100644
--- a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-change-ref-name.post.yaml
+++ b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-ref-name-immutable.post.yaml
@@ -1,5 +1,5 @@
---
-apiVersion: groupsnapshot.storage.k8s.io/v1beta1
+apiVersion: groupsnapshot.storage.k8s.io/v1beta2
kind: VolumeGroupSnapshotContent
metadata:
name: new-groupsnapshotcontent-demo
@@ -8,7 +8,11 @@ spec:
name: new-groupsnapshot-demo-changed
namespace: default
driver: hostpath.csi.k8s.io
+ volumeGroupSnapshotClassName: class-post
source:
- volumeHandles:
- - handles
+ groupSnapshotHandles:
+ volumeGroupSnapshotHandle: this-handle
+ volumeSnapshotHandles:
+ - handle
+ - another-handle
deletionPolicy: Retain
diff --git a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-ref-name-immutable.post.yaml.tx_err b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-ref-name-immutable.post.yaml.tx_err
new file mode 100644
index 000000000..412ded48c
--- /dev/null
+++ b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-ref-name-immutable.post.yaml.tx_err
@@ -0,0 +1 @@
+spec.volumeGroupSnapshotRef: Invalid value: "object": volumeGroupSnapshotRef.name and volumeGroupSnapshotRef.namespace are immutable
\ No newline at end of file
diff --git a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-ref-name-immutable.pre.yaml b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-ref-name-immutable.pre.yaml
new file mode 100644
index 000000000..769efd060
--- /dev/null
+++ b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-ref-name-immutable.pre.yaml
@@ -0,0 +1,18 @@
+---
+apiVersion: groupsnapshot.storage.k8s.io/v1beta2
+kind: VolumeGroupSnapshotContent
+metadata:
+ name: new-groupsnapshotcontent-demo
+spec:
+ volumeGroupSnapshotRef:
+ name: new-groupsnapshot-demo
+ namespace: default
+ driver: hostpath.csi.k8s.io
+ volumeGroupSnapshotClassName: class-post
+ source:
+ groupSnapshotHandles:
+ volumeGroupSnapshotHandle: this-handle
+ volumeSnapshotHandles:
+ - handle
+ - another-handle
+ deletionPolicy: Retain
diff --git a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-ref-namespace-immutable.post.yaml b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-ref-namespace-immutable.post.yaml
new file mode 100644
index 000000000..7e414862e
--- /dev/null
+++ b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-ref-namespace-immutable.post.yaml
@@ -0,0 +1,18 @@
+---
+apiVersion: groupsnapshot.storage.k8s.io/v1beta2
+kind: VolumeGroupSnapshotContent
+metadata:
+ name: new-groupsnapshotcontent-demo
+spec:
+ volumeGroupSnapshotRef:
+ name: new-groupsnapshot-demo-changed
+ namespace: default
+ driver: hostpath.csi.k8s.io
+ volumeGroupSnapshotClassName: class-post
+ source:
+ groupSnapshotHandles:
+ volumeGroupSnapshotHandle: this-handle
+ volumeSnapshotHandles:
+ - handle
+ - another-handle
+ deletionPolicy: Retain
diff --git a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-ref-namespace-immutable.post.yaml.tx_err b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-ref-namespace-immutable.post.yaml.tx_err
new file mode 100644
index 000000000..412ded48c
--- /dev/null
+++ b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-ref-namespace-immutable.post.yaml.tx_err
@@ -0,0 +1 @@
+spec.volumeGroupSnapshotRef: Invalid value: "object": volumeGroupSnapshotRef.name and volumeGroupSnapshotRef.namespace are immutable
\ No newline at end of file
diff --git a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-ref-namespace-immutable.pre.yaml b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-ref-namespace-immutable.pre.yaml
new file mode 100644
index 000000000..769efd060
--- /dev/null
+++ b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-ref-namespace-immutable.pre.yaml
@@ -0,0 +1,18 @@
+---
+apiVersion: groupsnapshot.storage.k8s.io/v1beta2
+kind: VolumeGroupSnapshotContent
+metadata:
+ name: new-groupsnapshotcontent-demo
+spec:
+ volumeGroupSnapshotRef:
+ name: new-groupsnapshot-demo
+ namespace: default
+ driver: hostpath.csi.k8s.io
+ volumeGroupSnapshotClassName: class-post
+ source:
+ groupSnapshotHandles:
+ volumeGroupSnapshotHandle: this-handle
+ volumeSnapshotHandles:
+ - handle
+ - another-handle
+ deletionPolicy: Retain
diff --git a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-ref-only-name.yaml b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-ref-only-name.yaml
index 77fb4a0c1..fe87307b1 100644
--- a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-ref-only-name.yaml
+++ b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-ref-only-name.yaml
@@ -1,5 +1,5 @@
---
-apiVersion: groupsnapshot.storage.k8s.io/v1beta1
+apiVersion: groupsnapshot.storage.k8s.io/v1beta2
kind: VolumeGroupSnapshotContent
metadata:
name: new-groupsnapshotcontent-demo
diff --git a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-ref-only-namespace.yaml b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-ref-only-namespace.yaml
index 4b8429665..76bc3bb44 100644
--- a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-ref-only-namespace.yaml
+++ b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-ref-only-namespace.yaml
@@ -1,5 +1,5 @@
---
-apiVersion: groupsnapshot.storage.k8s.io/v1beta1
+apiVersion: groupsnapshot.storage.k8s.io/v1beta2
kind: VolumeGroupSnapshotContent
metadata:
name: new-groupsnapshotcontent-demo
diff --git a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-ref-uid-immutable.post.yaml b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-ref-uid-immutable.post.yaml
new file mode 100644
index 000000000..4e4060f04
--- /dev/null
+++ b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-ref-uid-immutable.post.yaml
@@ -0,0 +1,19 @@
+---
+apiVersion: groupsnapshot.storage.k8s.io/v1beta2
+kind: VolumeGroupSnapshotContent
+metadata:
+ name: new-groupsnapshotcontent-demo
+spec:
+ volumeGroupSnapshotRef:
+ name: new-groupsnapshot-demo-changed
+ namespace: default
+ uid: f7eebb71-a58e-4196-82a1-ebd854ae2bb0
+ driver: hostpath.csi.k8s.io
+ volumeGroupSnapshotClassName: class-post
+ source:
+ groupSnapshotHandles:
+ volumeGroupSnapshotHandle: this-handle
+ volumeSnapshotHandles:
+ - handle
+ - another-handle
+ deletionPolicy: Retain
diff --git a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-ref-uid-immutable.post.yaml.tx_err b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-ref-uid-immutable.post.yaml.tx_err
new file mode 100644
index 000000000..feb6e7f67
--- /dev/null
+++ b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-ref-uid-immutable.post.yaml.tx_err
@@ -0,0 +1 @@
+Invalid value: "object": volumeGroupSnapshotRef.uid is immutable once set
\ No newline at end of file
diff --git a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-ref-uid-immutable.pre.yaml b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-ref-uid-immutable.pre.yaml
new file mode 100644
index 000000000..ec609e970
--- /dev/null
+++ b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-ref-uid-immutable.pre.yaml
@@ -0,0 +1,19 @@
+---
+apiVersion: groupsnapshot.storage.k8s.io/v1beta2
+kind: VolumeGroupSnapshotContent
+metadata:
+ name: new-groupsnapshotcontent-demo
+spec:
+ volumeGroupSnapshotRef:
+ name: new-groupsnapshot-demo-changed
+ namespace: default
+ uid: f7eebb71-a58e-4196-82a1-ebd854ae2bb9
+ driver: hostpath.csi.k8s.io
+ volumeGroupSnapshotClassName: class-post
+ source:
+ groupSnapshotHandles:
+ volumeGroupSnapshotHandle: this-handle
+ volumeSnapshotHandles:
+ - handle
+ - another-handle
+ deletionPolicy: Retain
diff --git a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-ref-uid-set-from-empty.post.yaml b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-ref-uid-set-from-empty.post.yaml
new file mode 100644
index 000000000..ec609e970
--- /dev/null
+++ b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-ref-uid-set-from-empty.post.yaml
@@ -0,0 +1,19 @@
+---
+apiVersion: groupsnapshot.storage.k8s.io/v1beta2
+kind: VolumeGroupSnapshotContent
+metadata:
+ name: new-groupsnapshotcontent-demo
+spec:
+ volumeGroupSnapshotRef:
+ name: new-groupsnapshot-demo-changed
+ namespace: default
+ uid: f7eebb71-a58e-4196-82a1-ebd854ae2bb9
+ driver: hostpath.csi.k8s.io
+ volumeGroupSnapshotClassName: class-post
+ source:
+ groupSnapshotHandles:
+ volumeGroupSnapshotHandle: this-handle
+ volumeSnapshotHandles:
+ - handle
+ - another-handle
+ deletionPolicy: Retain
diff --git a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-ref-uid-set-from-empty.pre.yaml b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-ref-uid-set-from-empty.pre.yaml
new file mode 100644
index 000000000..7e414862e
--- /dev/null
+++ b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-ref-uid-set-from-empty.pre.yaml
@@ -0,0 +1,18 @@
+---
+apiVersion: groupsnapshot.storage.k8s.io/v1beta2
+kind: VolumeGroupSnapshotContent
+metadata:
+ name: new-groupsnapshotcontent-demo
+spec:
+ volumeGroupSnapshotRef:
+ name: new-groupsnapshot-demo-changed
+ namespace: default
+ driver: hostpath.csi.k8s.io
+ volumeGroupSnapshotClassName: class-post
+ source:
+ groupSnapshotHandles:
+ volumeGroupSnapshotHandle: this-handle
+ volumeSnapshotHandles:
+ - handle
+ - another-handle
+ deletionPolicy: Retain
diff --git a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-source-both-volume-and-groupsnapshot.yaml b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-source-both-volume-and-groupsnapshot.yaml
index 28dae401a..a3215e244 100644
--- a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-source-both-volume-and-groupsnapshot.yaml
+++ b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-source-both-volume-and-groupsnapshot.yaml
@@ -1,5 +1,5 @@
---
-apiVersion: groupsnapshot.storage.k8s.io/v1beta1
+apiVersion: groupsnapshot.storage.k8s.io/v1beta2
kind: VolumeGroupSnapshotContent
metadata:
name: new-groupsnapshotcontent-demo
diff --git a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-source-empty.yaml b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-source-empty.yaml
index 4c62db7dc..e54f6c1cc 100644
--- a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-source-empty.yaml
+++ b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-source-empty.yaml
@@ -1,5 +1,5 @@
---
-apiVersion: groupsnapshot.storage.k8s.io/v1beta1
+apiVersion: groupsnapshot.storage.k8s.io/v1beta2
kind: VolumeGroupSnapshotContent
metadata:
name: new-groupsnapshotcontent-demo
diff --git a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-source-groupsnapshot-immutable.post.yaml b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-source-groupsnapshot-immutable.post.yaml
index 52dd8ba61..9814e9933 100644
--- a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-source-groupsnapshot-immutable.post.yaml
+++ b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-source-groupsnapshot-immutable.post.yaml
@@ -1,5 +1,5 @@
---
-apiVersion: groupsnapshot.storage.k8s.io/v1beta1
+apiVersion: groupsnapshot.storage.k8s.io/v1beta2
kind: VolumeGroupSnapshotContent
metadata:
name: new-groupsnapshotcontent-demo
diff --git a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-source-groupsnapshot-immutable.pre.yaml b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-source-groupsnapshot-immutable.pre.yaml
index 10ff343b3..6aa9ce13f 100644
--- a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-source-groupsnapshot-immutable.pre.yaml
+++ b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-source-groupsnapshot-immutable.pre.yaml
@@ -1,5 +1,5 @@
---
-apiVersion: groupsnapshot.storage.k8s.io/v1beta1
+apiVersion: groupsnapshot.storage.k8s.io/v1beta2
kind: VolumeGroupSnapshotContent
metadata:
name: new-groupsnapshotcontent-demo
diff --git a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-source-groupsnapshot-to-volume.post.yaml b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-source-groupsnapshot-to-volume.post.yaml
index ee1ff2d91..1e0663454 100644
--- a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-source-groupsnapshot-to-volume.post.yaml
+++ b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-source-groupsnapshot-to-volume.post.yaml
@@ -1,5 +1,5 @@
---
-apiVersion: groupsnapshot.storage.k8s.io/v1beta1
+apiVersion: groupsnapshot.storage.k8s.io/v1beta2
kind: VolumeGroupSnapshotContent
metadata:
name: new-groupsnapshotcontent-demo
diff --git a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-source-groupsnapshot-to-volume.pre.yaml b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-source-groupsnapshot-to-volume.pre.yaml
index 10ff343b3..6aa9ce13f 100644
--- a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-source-groupsnapshot-to-volume.pre.yaml
+++ b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-source-groupsnapshot-to-volume.pre.yaml
@@ -1,5 +1,5 @@
---
-apiVersion: groupsnapshot.storage.k8s.io/v1beta1
+apiVersion: groupsnapshot.storage.k8s.io/v1beta2
kind: VolumeGroupSnapshotContent
metadata:
name: new-groupsnapshotcontent-demo
diff --git a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-source-volume-immutable.post.yaml b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-source-volume-immutable.post.yaml
index 6d8630662..78066f749 100644
--- a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-source-volume-immutable.post.yaml
+++ b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-source-volume-immutable.post.yaml
@@ -1,5 +1,5 @@
---
-apiVersion: groupsnapshot.storage.k8s.io/v1beta1
+apiVersion: groupsnapshot.storage.k8s.io/v1beta2
kind: VolumeGroupSnapshotContent
metadata:
name: new-groupsnapshotcontent-demo
diff --git a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-source-volume-immutable.pre.yaml b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-source-volume-immutable.pre.yaml
index ee1ff2d91..1e0663454 100644
--- a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-source-volume-immutable.pre.yaml
+++ b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-source-volume-immutable.pre.yaml
@@ -1,5 +1,5 @@
---
-apiVersion: groupsnapshot.storage.k8s.io/v1beta1
+apiVersion: groupsnapshot.storage.k8s.io/v1beta2
kind: VolumeGroupSnapshotContent
metadata:
name: new-groupsnapshotcontent-demo
diff --git a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-source-volume-to-groupsnapshot.post.yaml b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-source-volume-to-groupsnapshot.post.yaml
index 10ff343b3..6aa9ce13f 100644
--- a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-source-volume-to-groupsnapshot.post.yaml
+++ b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-source-volume-to-groupsnapshot.post.yaml
@@ -1,5 +1,5 @@
---
-apiVersion: groupsnapshot.storage.k8s.io/v1beta1
+apiVersion: groupsnapshot.storage.k8s.io/v1beta2
kind: VolumeGroupSnapshotContent
metadata:
name: new-groupsnapshotcontent-demo
diff --git a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-source-volume-to-groupsnapshot.pre.yaml b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-source-volume-to-groupsnapshot.pre.yaml
index ee1ff2d91..1e0663454 100644
--- a/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-source-volume-to-groupsnapshot.pre.yaml
+++ b/client/hack/cel-tests/volumegroupsnapshotcontent/vgsc-source-volume-to-groupsnapshot.pre.yaml
@@ -1,5 +1,5 @@
---
-apiVersion: groupsnapshot.storage.k8s.io/v1beta1
+apiVersion: groupsnapshot.storage.k8s.io/v1beta2
kind: VolumeGroupSnapshotContent
metadata:
name: new-groupsnapshotcontent-demo
diff --git a/client/hack/run-cel-tests.sh b/client/hack/run-cel-tests.sh
index 1cf07065f..66600ebb8 100755
--- a/client/hack/run-cel-tests.sh
+++ b/client/hack/run-cel-tests.sh
@@ -7,6 +7,20 @@ successes=0
cd "$(dirname "$0")"
+VERBOSE=""
+while [[ $# -gt 0 ]]; do
+ case $1 in
+ -v|--verbose)
+ VERBOSE="1"
+ shift
+ ;;
+ *)
+ echo "Unexpected argument '$1'"
+ exit 1
+ ;;
+ esac
+done
+
exec_case() {
local test_case="$1"
local test_case_err="$1.err"
@@ -26,7 +40,7 @@ exec_case() {
err=$(cat "$test_case_err")
if grep "$err" "$test_case_out" > /dev/null 2>&1; then
((++successes))
- echo "$test_case: SUCCESS (expected failure)"
+ if [ ! -z "$VERBOSE" ]; then echo "$test_case: SUCCESS (expected failure)"; fi
else
echo "$test_case: FAIL (unexpected msg): $output"
((++failures))
@@ -35,7 +49,7 @@ exec_case() {
echo "$test_case: FAIL (unexpected success): $output"
((++failures))
elif [ $ret == 0 ] && [ ! -f "$test_case_err" ]; then
- echo "$test_case: SUCCESS"
+ if [ ! -z "$VERBOSE" ]; then echo "$test_case: SUCCESS"; fi
((++successes))
fi
}
@@ -66,7 +80,7 @@ exec_tx_case() {
local err
err=$(cat "$test_case_err")
if grep "$err" "$test_case_out" > /dev/null 2>&1; then
- echo "$test_header SUCCESS (expected failure)"
+ if [ ! -z "$VERBOSE" ]; then echo "$test_header SUCCESS (expected failure)"; fi
((++successes))
else
echo "$test_header FAIL (unexpected msg): $output"
@@ -76,7 +90,7 @@ exec_tx_case() {
echo "$test_header FAIL (unexpected success): $output"
((++failures))
elif [ $ret == 0 ] && [ ! -f "$test_case_err" ]; then
- echo "$test_header SUCCESS"
+ if [ ! -z "$VERBOSE" ]; then echo "$test_header SUCCESS"; fi
((++successes))
fi
}
diff --git a/client/informers/externalversions/factory.go b/client/informers/externalversions/factory.go
index b2b0ef72f..d13af4687 100644
--- a/client/informers/externalversions/factory.go
+++ b/client/informers/externalversions/factory.go
@@ -1,5 +1,5 @@
/*
-Copyright 2024 The Kubernetes Authors.
+Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/client/informers/externalversions/generic.go b/client/informers/externalversions/generic.go
index 3f4fb8ee5..2fb01475c 100644
--- a/client/informers/externalversions/generic.go
+++ b/client/informers/externalversions/generic.go
@@ -1,5 +1,5 @@
/*
-Copyright 2024 The Kubernetes Authors.
+Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -22,6 +22,7 @@ import (
"fmt"
v1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta1"
+ v1beta2 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta2"
v1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
schema "k8s.io/apimachinery/pkg/runtime/schema"
cache "k8s.io/client-go/tools/cache"
@@ -61,6 +62,14 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource
case v1beta1.SchemeGroupVersion.WithResource("volumegroupsnapshotcontents"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Groupsnapshot().V1beta1().VolumeGroupSnapshotContents().Informer()}, nil
+ // Group=groupsnapshot.storage.k8s.io, Version=v1beta2
+ case v1beta2.SchemeGroupVersion.WithResource("volumegroupsnapshots"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Groupsnapshot().V1beta2().VolumeGroupSnapshots().Informer()}, nil
+ case v1beta2.SchemeGroupVersion.WithResource("volumegroupsnapshotclasses"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Groupsnapshot().V1beta2().VolumeGroupSnapshotClasses().Informer()}, nil
+ case v1beta2.SchemeGroupVersion.WithResource("volumegroupsnapshotcontents"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Groupsnapshot().V1beta2().VolumeGroupSnapshotContents().Informer()}, nil
+
// Group=snapshot.storage.k8s.io, Version=v1
case v1.SchemeGroupVersion.WithResource("volumesnapshots"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Snapshot().V1().VolumeSnapshots().Informer()}, nil
diff --git a/client/informers/externalversions/internalinterfaces/factory_interfaces.go b/client/informers/externalversions/internalinterfaces/factory_interfaces.go
index 50c56e95e..43f74471b 100644
--- a/client/informers/externalversions/internalinterfaces/factory_interfaces.go
+++ b/client/informers/externalversions/internalinterfaces/factory_interfaces.go
@@ -1,5 +1,5 @@
/*
-Copyright 2024 The Kubernetes Authors.
+Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/client/informers/externalversions/volumegroupsnapshot/interface.go b/client/informers/externalversions/volumegroupsnapshot/interface.go
index e75bc2edf..fdd82f6cf 100644
--- a/client/informers/externalversions/volumegroupsnapshot/interface.go
+++ b/client/informers/externalversions/volumegroupsnapshot/interface.go
@@ -1,5 +1,5 @@
/*
-Copyright 2024 The Kubernetes Authors.
+Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -21,12 +21,15 @@ package volumegroupsnapshot
import (
internalinterfaces "github.com/kubernetes-csi/external-snapshotter/client/v8/informers/externalversions/internalinterfaces"
v1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v8/informers/externalversions/volumegroupsnapshot/v1beta1"
+ v1beta2 "github.com/kubernetes-csi/external-snapshotter/client/v8/informers/externalversions/volumegroupsnapshot/v1beta2"
)
// Interface provides access to each of this group's versions.
type Interface interface {
// V1beta1 provides access to shared informers for resources in V1beta1.
V1beta1() v1beta1.Interface
+ // V1beta2 provides access to shared informers for resources in V1beta2.
+ V1beta2() v1beta2.Interface
}
type group struct {
@@ -44,3 +47,8 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList
func (g *group) V1beta1() v1beta1.Interface {
return v1beta1.New(g.factory, g.namespace, g.tweakListOptions)
}
+
+// V1beta2 returns a new v1beta2.Interface.
+func (g *group) V1beta2() v1beta2.Interface {
+ return v1beta2.New(g.factory, g.namespace, g.tweakListOptions)
+}
diff --git a/client/informers/externalversions/volumegroupsnapshot/v1beta2/interface.go b/client/informers/externalversions/volumegroupsnapshot/v1beta2/interface.go
new file mode 100644
index 000000000..67419a8e3
--- /dev/null
+++ b/client/informers/externalversions/volumegroupsnapshot/v1beta2/interface.go
@@ -0,0 +1,59 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1beta2
+
+import (
+ internalinterfaces "github.com/kubernetes-csi/external-snapshotter/client/v8/informers/externalversions/internalinterfaces"
+)
+
+// Interface provides access to all the informers in this group version.
+type Interface interface {
+ // VolumeGroupSnapshots returns a VolumeGroupSnapshotInformer.
+ VolumeGroupSnapshots() VolumeGroupSnapshotInformer
+ // VolumeGroupSnapshotClasses returns a VolumeGroupSnapshotClassInformer.
+ VolumeGroupSnapshotClasses() VolumeGroupSnapshotClassInformer
+ // VolumeGroupSnapshotContents returns a VolumeGroupSnapshotContentInformer.
+ VolumeGroupSnapshotContents() VolumeGroupSnapshotContentInformer
+}
+
+type version struct {
+ factory internalinterfaces.SharedInformerFactory
+ namespace string
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// New returns a new Interface.
+func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
+ return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
+}
+
+// VolumeGroupSnapshots returns a VolumeGroupSnapshotInformer.
+func (v *version) VolumeGroupSnapshots() VolumeGroupSnapshotInformer {
+ return &volumeGroupSnapshotInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
+}
+
+// VolumeGroupSnapshotClasses returns a VolumeGroupSnapshotClassInformer.
+func (v *version) VolumeGroupSnapshotClasses() VolumeGroupSnapshotClassInformer {
+ return &volumeGroupSnapshotClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
+}
+
+// VolumeGroupSnapshotContents returns a VolumeGroupSnapshotContentInformer.
+func (v *version) VolumeGroupSnapshotContents() VolumeGroupSnapshotContentInformer {
+ return &volumeGroupSnapshotContentInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
+}
diff --git a/client/informers/externalversions/volumegroupsnapshot/v1beta2/volumegroupsnapshot.go b/client/informers/externalversions/volumegroupsnapshot/v1beta2/volumegroupsnapshot.go
new file mode 100644
index 000000000..d268d6f30
--- /dev/null
+++ b/client/informers/externalversions/volumegroupsnapshot/v1beta2/volumegroupsnapshot.go
@@ -0,0 +1,90 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1beta2
+
+import (
+ "context"
+ time "time"
+
+ volumegroupsnapshotv1beta2 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta2"
+ versioned "github.com/kubernetes-csi/external-snapshotter/client/v8/clientset/versioned"
+ internalinterfaces "github.com/kubernetes-csi/external-snapshotter/client/v8/informers/externalversions/internalinterfaces"
+ v1beta2 "github.com/kubernetes-csi/external-snapshotter/client/v8/listers/volumegroupsnapshot/v1beta2"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// VolumeGroupSnapshotInformer provides access to a shared informer and lister for
+// VolumeGroupSnapshots.
+type VolumeGroupSnapshotInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1beta2.VolumeGroupSnapshotLister
+}
+
+type volumeGroupSnapshotInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+ namespace string
+}
+
+// NewVolumeGroupSnapshotInformer constructs a new informer for VolumeGroupSnapshot type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewVolumeGroupSnapshotInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredVolumeGroupSnapshotInformer(client, namespace, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredVolumeGroupSnapshotInformer constructs a new informer for VolumeGroupSnapshot type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredVolumeGroupSnapshotInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.GroupsnapshotV1beta2().VolumeGroupSnapshots(namespace).List(context.TODO(), options)
+ },
+ WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.GroupsnapshotV1beta2().VolumeGroupSnapshots(namespace).Watch(context.TODO(), options)
+ },
+ },
+ &volumegroupsnapshotv1beta2.VolumeGroupSnapshot{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *volumeGroupSnapshotInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredVolumeGroupSnapshotInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *volumeGroupSnapshotInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&volumegroupsnapshotv1beta2.VolumeGroupSnapshot{}, f.defaultInformer)
+}
+
+func (f *volumeGroupSnapshotInformer) Lister() v1beta2.VolumeGroupSnapshotLister {
+ return v1beta2.NewVolumeGroupSnapshotLister(f.Informer().GetIndexer())
+}
diff --git a/client/informers/externalversions/volumegroupsnapshot/v1beta2/volumegroupsnapshotclass.go b/client/informers/externalversions/volumegroupsnapshot/v1beta2/volumegroupsnapshotclass.go
new file mode 100644
index 000000000..b5cf8d6b5
--- /dev/null
+++ b/client/informers/externalversions/volumegroupsnapshot/v1beta2/volumegroupsnapshotclass.go
@@ -0,0 +1,89 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1beta2
+
+import (
+ "context"
+ time "time"
+
+ volumegroupsnapshotv1beta2 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta2"
+ versioned "github.com/kubernetes-csi/external-snapshotter/client/v8/clientset/versioned"
+ internalinterfaces "github.com/kubernetes-csi/external-snapshotter/client/v8/informers/externalversions/internalinterfaces"
+ v1beta2 "github.com/kubernetes-csi/external-snapshotter/client/v8/listers/volumegroupsnapshot/v1beta2"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// VolumeGroupSnapshotClassInformer provides access to a shared informer and lister for
+// VolumeGroupSnapshotClasses.
+type VolumeGroupSnapshotClassInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1beta2.VolumeGroupSnapshotClassLister
+}
+
+type volumeGroupSnapshotClassInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// NewVolumeGroupSnapshotClassInformer constructs a new informer for VolumeGroupSnapshotClass type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewVolumeGroupSnapshotClassInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredVolumeGroupSnapshotClassInformer(client, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredVolumeGroupSnapshotClassInformer constructs a new informer for VolumeGroupSnapshotClass type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredVolumeGroupSnapshotClassInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.GroupsnapshotV1beta2().VolumeGroupSnapshotClasses().List(context.TODO(), options)
+ },
+ WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.GroupsnapshotV1beta2().VolumeGroupSnapshotClasses().Watch(context.TODO(), options)
+ },
+ },
+ &volumegroupsnapshotv1beta2.VolumeGroupSnapshotClass{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *volumeGroupSnapshotClassInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredVolumeGroupSnapshotClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *volumeGroupSnapshotClassInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&volumegroupsnapshotv1beta2.VolumeGroupSnapshotClass{}, f.defaultInformer)
+}
+
+func (f *volumeGroupSnapshotClassInformer) Lister() v1beta2.VolumeGroupSnapshotClassLister {
+ return v1beta2.NewVolumeGroupSnapshotClassLister(f.Informer().GetIndexer())
+}
diff --git a/client/informers/externalversions/volumegroupsnapshot/v1beta2/volumegroupsnapshotcontent.go b/client/informers/externalversions/volumegroupsnapshot/v1beta2/volumegroupsnapshotcontent.go
new file mode 100644
index 000000000..bde0a3036
--- /dev/null
+++ b/client/informers/externalversions/volumegroupsnapshot/v1beta2/volumegroupsnapshotcontent.go
@@ -0,0 +1,89 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1beta2
+
+import (
+ "context"
+ time "time"
+
+ volumegroupsnapshotv1beta2 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta2"
+ versioned "github.com/kubernetes-csi/external-snapshotter/client/v8/clientset/versioned"
+ internalinterfaces "github.com/kubernetes-csi/external-snapshotter/client/v8/informers/externalversions/internalinterfaces"
+ v1beta2 "github.com/kubernetes-csi/external-snapshotter/client/v8/listers/volumegroupsnapshot/v1beta2"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// VolumeGroupSnapshotContentInformer provides access to a shared informer and lister for
+// VolumeGroupSnapshotContents.
+type VolumeGroupSnapshotContentInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1beta2.VolumeGroupSnapshotContentLister
+}
+
+type volumeGroupSnapshotContentInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// NewVolumeGroupSnapshotContentInformer constructs a new informer for VolumeGroupSnapshotContent type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewVolumeGroupSnapshotContentInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredVolumeGroupSnapshotContentInformer(client, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredVolumeGroupSnapshotContentInformer constructs a new informer for VolumeGroupSnapshotContent type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredVolumeGroupSnapshotContentInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.GroupsnapshotV1beta2().VolumeGroupSnapshotContents().List(context.TODO(), options)
+ },
+ WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.GroupsnapshotV1beta2().VolumeGroupSnapshotContents().Watch(context.TODO(), options)
+ },
+ },
+ &volumegroupsnapshotv1beta2.VolumeGroupSnapshotContent{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *volumeGroupSnapshotContentInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredVolumeGroupSnapshotContentInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *volumeGroupSnapshotContentInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&volumegroupsnapshotv1beta2.VolumeGroupSnapshotContent{}, f.defaultInformer)
+}
+
+func (f *volumeGroupSnapshotContentInformer) Lister() v1beta2.VolumeGroupSnapshotContentLister {
+ return v1beta2.NewVolumeGroupSnapshotContentLister(f.Informer().GetIndexer())
+}
diff --git a/client/listers/volumegroupsnapshot/v1beta2/expansion_generated.go b/client/listers/volumegroupsnapshot/v1beta2/expansion_generated.go
new file mode 100644
index 000000000..9a5a89ec2
--- /dev/null
+++ b/client/listers/volumegroupsnapshot/v1beta2/expansion_generated.go
@@ -0,0 +1,35 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1beta2
+
+// VolumeGroupSnapshotListerExpansion allows custom methods to be added to
+// VolumeGroupSnapshotLister.
+type VolumeGroupSnapshotListerExpansion interface{}
+
+// VolumeGroupSnapshotNamespaceListerExpansion allows custom methods to be added to
+// VolumeGroupSnapshotNamespaceLister.
+type VolumeGroupSnapshotNamespaceListerExpansion interface{}
+
+// VolumeGroupSnapshotClassListerExpansion allows custom methods to be added to
+// VolumeGroupSnapshotClassLister.
+type VolumeGroupSnapshotClassListerExpansion interface{}
+
+// VolumeGroupSnapshotContentListerExpansion allows custom methods to be added to
+// VolumeGroupSnapshotContentLister.
+type VolumeGroupSnapshotContentListerExpansion interface{}
diff --git a/client/listers/volumegroupsnapshot/v1beta2/volumegroupsnapshot.go b/client/listers/volumegroupsnapshot/v1beta2/volumegroupsnapshot.go
new file mode 100644
index 000000000..cc58c9aa9
--- /dev/null
+++ b/client/listers/volumegroupsnapshot/v1beta2/volumegroupsnapshot.go
@@ -0,0 +1,99 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1beta2
+
+import (
+ v1beta2 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta2"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// VolumeGroupSnapshotLister helps list VolumeGroupSnapshots.
+// All objects returned here must be treated as read-only.
+type VolumeGroupSnapshotLister interface {
+ // List lists all VolumeGroupSnapshots in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1beta2.VolumeGroupSnapshot, err error)
+ // VolumeGroupSnapshots returns an object that can list and get VolumeGroupSnapshots.
+ VolumeGroupSnapshots(namespace string) VolumeGroupSnapshotNamespaceLister
+ VolumeGroupSnapshotListerExpansion
+}
+
+// volumeGroupSnapshotLister implements the VolumeGroupSnapshotLister interface.
+type volumeGroupSnapshotLister struct {
+ indexer cache.Indexer
+}
+
+// NewVolumeGroupSnapshotLister returns a new VolumeGroupSnapshotLister.
+func NewVolumeGroupSnapshotLister(indexer cache.Indexer) VolumeGroupSnapshotLister {
+ return &volumeGroupSnapshotLister{indexer: indexer}
+}
+
+// List lists all VolumeGroupSnapshots in the indexer.
+func (s *volumeGroupSnapshotLister) List(selector labels.Selector) (ret []*v1beta2.VolumeGroupSnapshot, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1beta2.VolumeGroupSnapshot))
+ })
+ return ret, err
+}
+
+// VolumeGroupSnapshots returns an object that can list and get VolumeGroupSnapshots.
+func (s *volumeGroupSnapshotLister) VolumeGroupSnapshots(namespace string) VolumeGroupSnapshotNamespaceLister {
+ return volumeGroupSnapshotNamespaceLister{indexer: s.indexer, namespace: namespace}
+}
+
+// VolumeGroupSnapshotNamespaceLister helps list and get VolumeGroupSnapshots.
+// All objects returned here must be treated as read-only.
+type VolumeGroupSnapshotNamespaceLister interface {
+ // List lists all VolumeGroupSnapshots in the indexer for a given namespace.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1beta2.VolumeGroupSnapshot, err error)
+ // Get retrieves the VolumeGroupSnapshot from the indexer for a given namespace and name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1beta2.VolumeGroupSnapshot, error)
+ VolumeGroupSnapshotNamespaceListerExpansion
+}
+
+// volumeGroupSnapshotNamespaceLister implements the VolumeGroupSnapshotNamespaceLister
+// interface.
+type volumeGroupSnapshotNamespaceLister struct {
+ indexer cache.Indexer
+ namespace string
+}
+
+// List lists all VolumeGroupSnapshots in the indexer for a given namespace.
+func (s volumeGroupSnapshotNamespaceLister) List(selector labels.Selector) (ret []*v1beta2.VolumeGroupSnapshot, err error) {
+ err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1beta2.VolumeGroupSnapshot))
+ })
+ return ret, err
+}
+
+// Get retrieves the VolumeGroupSnapshot from the indexer for a given namespace and name.
+func (s volumeGroupSnapshotNamespaceLister) Get(name string) (*v1beta2.VolumeGroupSnapshot, error) {
+ obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1beta2.Resource("volumegroupsnapshot"), name)
+ }
+ return obj.(*v1beta2.VolumeGroupSnapshot), nil
+}
diff --git a/client/listers/volumegroupsnapshot/v1beta2/volumegroupsnapshotclass.go b/client/listers/volumegroupsnapshot/v1beta2/volumegroupsnapshotclass.go
new file mode 100644
index 000000000..9beb4b35c
--- /dev/null
+++ b/client/listers/volumegroupsnapshot/v1beta2/volumegroupsnapshotclass.go
@@ -0,0 +1,68 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1beta2
+
+import (
+ v1beta2 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta2"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// VolumeGroupSnapshotClassLister helps list VolumeGroupSnapshotClasses.
+// All objects returned here must be treated as read-only.
+type VolumeGroupSnapshotClassLister interface {
+ // List lists all VolumeGroupSnapshotClasses in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1beta2.VolumeGroupSnapshotClass, err error)
+ // Get retrieves the VolumeGroupSnapshotClass from the index for a given name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1beta2.VolumeGroupSnapshotClass, error)
+ VolumeGroupSnapshotClassListerExpansion
+}
+
+// volumeGroupSnapshotClassLister implements the VolumeGroupSnapshotClassLister interface.
+type volumeGroupSnapshotClassLister struct {
+ indexer cache.Indexer
+}
+
+// NewVolumeGroupSnapshotClassLister returns a new VolumeGroupSnapshotClassLister.
+func NewVolumeGroupSnapshotClassLister(indexer cache.Indexer) VolumeGroupSnapshotClassLister {
+ return &volumeGroupSnapshotClassLister{indexer: indexer}
+}
+
+// List lists all VolumeGroupSnapshotClasses in the indexer.
+func (s *volumeGroupSnapshotClassLister) List(selector labels.Selector) (ret []*v1beta2.VolumeGroupSnapshotClass, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1beta2.VolumeGroupSnapshotClass))
+ })
+ return ret, err
+}
+
+// Get retrieves the VolumeGroupSnapshotClass from the index for a given name.
+func (s *volumeGroupSnapshotClassLister) Get(name string) (*v1beta2.VolumeGroupSnapshotClass, error) {
+ obj, exists, err := s.indexer.GetByKey(name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1beta2.Resource("volumegroupsnapshotclass"), name)
+ }
+ return obj.(*v1beta2.VolumeGroupSnapshotClass), nil
+}
diff --git a/client/listers/volumegroupsnapshot/v1beta2/volumegroupsnapshotcontent.go b/client/listers/volumegroupsnapshot/v1beta2/volumegroupsnapshotcontent.go
new file mode 100644
index 000000000..b84723390
--- /dev/null
+++ b/client/listers/volumegroupsnapshot/v1beta2/volumegroupsnapshotcontent.go
@@ -0,0 +1,68 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1beta2
+
+import (
+ v1beta2 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta2"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// VolumeGroupSnapshotContentLister helps list VolumeGroupSnapshotContents.
+// All objects returned here must be treated as read-only.
+type VolumeGroupSnapshotContentLister interface {
+ // List lists all VolumeGroupSnapshotContents in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1beta2.VolumeGroupSnapshotContent, err error)
+ // Get retrieves the VolumeGroupSnapshotContent from the index for a given name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1beta2.VolumeGroupSnapshotContent, error)
+ VolumeGroupSnapshotContentListerExpansion
+}
+
+// volumeGroupSnapshotContentLister implements the VolumeGroupSnapshotContentLister interface.
+type volumeGroupSnapshotContentLister struct {
+ indexer cache.Indexer
+}
+
+// NewVolumeGroupSnapshotContentLister returns a new VolumeGroupSnapshotContentLister.
+func NewVolumeGroupSnapshotContentLister(indexer cache.Indexer) VolumeGroupSnapshotContentLister {
+ return &volumeGroupSnapshotContentLister{indexer: indexer}
+}
+
+// List lists all VolumeGroupSnapshotContents in the indexer.
+func (s *volumeGroupSnapshotContentLister) List(selector labels.Selector) (ret []*v1beta2.VolumeGroupSnapshotContent, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1beta2.VolumeGroupSnapshotContent))
+ })
+ return ret, err
+}
+
+// Get retrieves the VolumeGroupSnapshotContent from the index for a given name.
+func (s *volumeGroupSnapshotContentLister) Get(name string) (*v1beta2.VolumeGroupSnapshotContent, error) {
+ obj, exists, err := s.indexer.GetByKey(name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1beta2.Resource("volumegroupsnapshotcontent"), name)
+ }
+ return obj.(*v1beta2.VolumeGroupSnapshotContent), nil
+}
diff --git a/cmd/csi-snapshotter/main.go b/cmd/csi-snapshotter/main.go
index e135cac88..78e9c8204 100644
--- a/cmd/csi-snapshotter/main.go
+++ b/cmd/csi-snapshotter/main.go
@@ -24,6 +24,7 @@ import (
"os"
"os/signal"
"strings"
+ "sync"
"time"
"google.golang.org/grpc"
@@ -31,6 +32,7 @@ import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
+ server "k8s.io/apiserver/pkg/server"
utilfeature "k8s.io/apiserver/pkg/util/feature"
coreinformers "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
@@ -296,24 +298,60 @@ func main() {
*extraCreateMetadata,
workqueue.NewTypedItemExponentialFailureRateLimiter[string](*retryIntervalStart, *retryIntervalMax),
utilfeature.DefaultFeatureGate.Enabled(features.VolumeGroupSnapshot),
- snapshotContentfactory.Groupsnapshot().V1beta1().VolumeGroupSnapshotContents(),
- snapshotContentfactory.Groupsnapshot().V1beta1().VolumeGroupSnapshotClasses(),
+ snapshotContentfactory.Groupsnapshot().V1beta2().VolumeGroupSnapshotContents(),
+ snapshotContentfactory.Groupsnapshot().V1beta2().VolumeGroupSnapshotClasses(),
workqueue.NewTypedItemExponentialFailureRateLimiter[string](*retryIntervalStart, *retryIntervalMax),
)
- run := func(context.Context) {
- // run...
- stopCh := make(chan struct{})
- snapshotContentfactory.Start(stopCh)
- factory.Start(stopCh)
- coreFactory.Start(stopCh)
- go ctrl.Run(*threads, stopCh)
-
- // ...until SIGINT
- c := make(chan os.Signal, 1)
- signal.Notify(c, os.Interrupt)
- <-c
- close(stopCh)
+ // handle SIGTERM and SIGINT by cancelling the context.
+ var (
+ terminate func() // called when all controllers are finished
+ controllerCtx context.Context // shuts down all controllers on a signal
+ shutdownHandler <-chan struct{} // called when the signal is received
+ )
+
+ if utilfeature.DefaultFeatureGate.Enabled(features.ReleaseLeaderElectionOnExit) {
+ // ctx waits for all controllers to finish, then shuts down the whole process, incl. leader election
+ ctx, terminate = context.WithCancel(ctx)
+ var cancelControllerCtx context.CancelFunc
+ controllerCtx, cancelControllerCtx = context.WithCancel(ctx)
+ shutdownHandler = server.SetupSignalHandler()
+
+ defer terminate()
+
+ go func() {
+ defer cancelControllerCtx()
+ <-shutdownHandler
+ klog.Info("Received SIGTERM or SIGINT signal, shutting down controller.")
+ }()
+ }
+
+ run := func(ctx context.Context) {
+ if utilfeature.DefaultFeatureGate.Enabled(features.ReleaseLeaderElectionOnExit) {
+ // run...
+ stopCh := controllerCtx.Done()
+ snapshotContentfactory.Start(stopCh)
+ factory.Start(stopCh)
+ coreFactory.Start(stopCh)
+ var controllerWg sync.WaitGroup
+ go ctrl.Run(*threads, stopCh, &controllerWg)
+ <-shutdownHandler
+ controllerWg.Wait()
+ terminate()
+ } else {
+ // run...
+ stopCh := make(chan struct{})
+ snapshotContentfactory.Start(stopCh)
+ factory.Start(stopCh)
+ coreFactory.Start(stopCh)
+ go ctrl.Run(*threads, stopCh, nil)
+
+ // ...until SIGINT
+ c := make(chan os.Signal, 1)
+ signal.Notify(c, os.Interrupt)
+ <-c
+ close(stopCh)
+ }
}
if !*leaderElection {
@@ -338,6 +376,10 @@ func main() {
le.WithLeaseDuration(*leaderElectionLeaseDuration)
le.WithRenewDeadline(*leaderElectionRenewDeadline)
le.WithRetryPeriod(*leaderElectionRetryPeriod)
+ if utilfeature.DefaultFeatureGate.Enabled(features.ReleaseLeaderElectionOnExit) {
+ le.WithReleaseOnCancel(true)
+ le.WithContext(ctx)
+ }
if err := le.Run(); err != nil {
klog.Fatalf("failed to initialize leader election: %v", err)
diff --git a/cmd/snapshot-controller/main.go b/cmd/snapshot-controller/main.go
index 7404ffca9..ce7fc38fd 100644
--- a/cmd/snapshot-controller/main.go
+++ b/cmd/snapshot-controller/main.go
@@ -39,6 +39,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/wait"
+ server "k8s.io/apiserver/pkg/server"
klog "k8s.io/klog/v2"
@@ -115,20 +116,20 @@ func ensureCustomResourceDefinitionsExist(client *clientset.Clientset, enableVol
return false, nil
}
if enableVolumeGroupSnapshots {
- _, err = client.GroupsnapshotV1beta1().VolumeGroupSnapshots("").List(ctx, listOptions)
+ _, err = client.GroupsnapshotV1beta2().VolumeGroupSnapshots("").List(ctx, listOptions)
if err != nil {
- klog.Errorf("Failed to list v1beta1 volumegroupsnapshots with error=%+v", err)
+ klog.Errorf("Failed to list v1beta2 volumegroupsnapshots with error=%+v", err)
return false, nil
}
- _, err = client.GroupsnapshotV1beta1().VolumeGroupSnapshotClasses().List(ctx, listOptions)
+ _, err = client.GroupsnapshotV1beta2().VolumeGroupSnapshotClasses().List(ctx, listOptions)
if err != nil {
- klog.Errorf("Failed to list v1beta1 volumegroupsnapshotclasses with error=%+v", err)
+ klog.Errorf("Failed to list v1beta2 volumegroupsnapshotclasses with error=%+v", err)
return false, nil
}
- _, err = client.GroupsnapshotV1beta1().VolumeGroupSnapshotContents().List(ctx, listOptions)
+ _, err = client.GroupsnapshotV1beta2().VolumeGroupSnapshotContents().List(ctx, listOptions)
if err != nil {
- klog.Errorf("Failed to list v1beta1 volumegroupsnapshotcontents with error=%+v", err)
+ klog.Errorf("Failed to list v1beta2 volumegroupsnapshotcontents with error=%+v", err)
return false, nil
}
}
@@ -237,9 +238,9 @@ func main() {
factory.Snapshot().V1().VolumeSnapshots(),
factory.Snapshot().V1().VolumeSnapshotContents(),
factory.Snapshot().V1().VolumeSnapshotClasses(),
- factory.Groupsnapshot().V1beta1().VolumeGroupSnapshots(),
- factory.Groupsnapshot().V1beta1().VolumeGroupSnapshotContents(),
- factory.Groupsnapshot().V1beta1().VolumeGroupSnapshotClasses(),
+ factory.Groupsnapshot().V1beta2().VolumeGroupSnapshots(),
+ factory.Groupsnapshot().V1beta2().VolumeGroupSnapshotContents(),
+ factory.Groupsnapshot().V1beta2().VolumeGroupSnapshotClasses(),
coreFactory.Core().V1().PersistentVolumeClaims(),
coreFactory.Core().V1().PersistentVolumes(),
nodeInformer,
@@ -259,18 +260,55 @@ func main() {
os.Exit(1)
}
- run := func(context.Context) {
- // run...
- stopCh := make(chan struct{})
- factory.Start(stopCh)
- coreFactory.Start(stopCh)
- go ctrl.Run(*threads, stopCh)
-
- // ...until SIGINT
- c := make(chan os.Signal, 1)
- signal.Notify(c, os.Interrupt)
- <-c
- close(stopCh)
+ ctx := context.Background()
+
+ // handle SIGTERM and SIGINT by cancelling the context.
+ var (
+ terminate func() // called when all controllers are finished
+ controllerCtx context.Context // shuts down all controllers on a signal
+ shutdownHandler <-chan struct{} // called when the signal is received
+ )
+
+ if utilfeature.DefaultFeatureGate.Enabled(features.ReleaseLeaderElectionOnExit) {
+ // ctx waits for all controllers to finish, then shuts down the whole process, incl. leader election
+ ctx, terminate = context.WithCancel(ctx)
+ var cancelControllerCtx context.CancelFunc
+ controllerCtx, cancelControllerCtx = context.WithCancel(ctx)
+ shutdownHandler = server.SetupSignalHandler()
+
+ defer terminate()
+
+ go func() {
+ defer cancelControllerCtx()
+ <-shutdownHandler
+ klog.Info("Received SIGTERM or SIGINT signal, shutting down controller.")
+ }()
+ }
+
+ run := func(ctx context.Context) {
+ if utilfeature.DefaultFeatureGate.Enabled(features.ReleaseLeaderElectionOnExit) {
+ // run...
+ stopCh := controllerCtx.Done()
+ factory.Start(stopCh)
+ coreFactory.Start(stopCh)
+ var controllerWg sync.WaitGroup
+ go ctrl.Run(*threads, stopCh, &controllerWg)
+ <-shutdownHandler
+ controllerWg.Wait()
+ terminate()
+ } else {
+ // run...
+ stopCh := make(chan struct{})
+ factory.Start(stopCh)
+ coreFactory.Start(stopCh)
+ go ctrl.Run(*threads, stopCh, nil)
+
+ // ...until SIGINT
+ c := make(chan os.Signal, 1)
+ signal.Notify(c, os.Interrupt)
+ <-c
+ close(stopCh)
+ }
}
// start listening & serving http endpoint if set
@@ -289,7 +327,7 @@ func main() {
klog.Infof("Metrics http server successfully started on %s, %s", *httpEndpoint, *metricsPath)
defer func() {
- err := srv.Shutdown(context.Background())
+ err := srv.Shutdown(ctx)
if err != nil {
klog.Errorf("Failed to shutdown metrics server: %s", err.Error())
}
@@ -300,7 +338,7 @@ func main() {
}
if !*leaderElection {
- run(context.TODO())
+ run(ctx)
} else {
lockName := "snapshot-controller-leader"
// Create a new clientset for leader election to prevent throttling
@@ -320,6 +358,11 @@ func main() {
le.WithLeaseDuration(*leaderElectionLeaseDuration)
le.WithRenewDeadline(*leaderElectionRenewDeadline)
le.WithRetryPeriod(*leaderElectionRetryPeriod)
+ if utilfeature.DefaultFeatureGate.Enabled(features.ReleaseLeaderElectionOnExit) {
+ le.WithReleaseOnCancel(true)
+ le.WithContext(ctx)
+ }
+
if err := le.Run(); err != nil {
klog.Fatalf("failed to initialize leader election: %v", err)
}
diff --git a/cmd/snapshot-conversion-webhook/Dockerfile b/cmd/snapshot-conversion-webhook/Dockerfile
new file mode 100644
index 000000000..71903474e
--- /dev/null
+++ b/cmd/snapshot-conversion-webhook/Dockerfile
@@ -0,0 +1,7 @@
+FROM gcr.io/distroless/static:latest
+LABEL maintainers="Kubernetes Authors"
+LABEL description="Snapshot Webhook"
+ARG binary=./bin/snapshot-conversion-webhook
+
+COPY ${binary} snapshot-conversion-webhook
+ENTRYPOINT ["/snapshot-conversion-webhook"]
diff --git a/cmd/snapshot-conversion-webhook/main.go b/cmd/snapshot-conversion-webhook/main.go
new file mode 100644
index 000000000..3a40d1ae8
--- /dev/null
+++ b/cmd/snapshot-conversion-webhook/main.go
@@ -0,0 +1,82 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ "context"
+ "crypto/tls"
+ "flag"
+
+ "k8s.io/component-base/logs"
+ logsapi "k8s.io/component-base/logs/api/v1"
+ "k8s.io/klog/v2"
+
+ "github.com/kubernetes-csi/csi-lib-utils/standardflags"
+ "github.com/kubernetes-csi/external-snapshotter/v8/pkg/webhook"
+)
+
+var (
+ certFile = flag.String(
+ "tls-cert-file",
+ "",
+ "File containing the x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert). Required.",
+ )
+ keyFile = flag.String(
+ "tls-private-key-file",
+ "",
+ "File containing the x509 private key matching --tls-cert-file. Required.",
+ )
+ port = flag.Int(
+ "port",
+ 443,
+ "Secure port that the webhook listens on",
+ )
+)
+
+func main() {
+ c := logsapi.NewLoggingConfiguration()
+ logsapi.AddGoFlags(c, flag.CommandLine)
+ logs.InitLogs()
+ standardflags.AddAutomaxprocs(klog.Infof)
+ flag.Parse()
+
+ klog.Info("Starting conversion webhook server")
+
+ if certFile == nil || *certFile == "" {
+ klog.Fatal("--tls-cert-file must be specified")
+ }
+ if keyFile == nil || *keyFile == "" {
+ klog.Fatal("--tls-private-key-file must be specified")
+ }
+
+ // Create new cert watcher
+ cw, err := webhook.NewCertWatcher(*certFile, *keyFile)
+ if err != nil {
+ klog.Fatalf("failed to initialize new cert watcher: %v", err)
+ }
+ tlsConfig := &tls.Config{
+ GetCertificate: cw.GetCertificate,
+ }
+
+ // Start the webhook server
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel() // stops certwatcher
+
+ if err := webhook.StartServer(ctx, tlsConfig, cw, *port); err != nil {
+ klog.Fatalf("server stopped: %v", err)
+ }
+}
diff --git a/deploy/kubernetes/webhook-example/README.md b/deploy/kubernetes/webhook-example/README.md
new file mode 100644
index 000000000..e9280423f
--- /dev/null
+++ b/deploy/kubernetes/webhook-example/README.md
@@ -0,0 +1,90 @@
+# Conversion Webhook
+
+The snapshot conversion webhook is an HTTP callback which responds to
+[conversion requests](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/#webhook-conversion),
+allowing the API server to convert between the VolumeGroupSnapshotContent v1beta1 API to and from the v1beta2 API.
+
+The cluster admin or Kubernetes distribution admin should install the webhook
+alongside the snapshot controllers and CRDs.
+
+## How to build the webhook
+
+Build the binary
+
+```bash
+make
+```
+
+Build the docker image
+
+```bash
+docker build -t snapshot-conversion-webhook:latest -f ./cmd/snapshot-conversion-webhook/Dockerfile .
+```
+
+## How to deploy the webhook
+
+The webhook server is provided as an image which can be built from this repository. It can be deployed anywhere,
+as long as the api server is able to reach it over HTTPS. It is recommended to deploy the webhook server in the
+cluster as snapshotting is latency sensitive.
+
+The CRD may need to be patched to allow safe TLS communication to the webhook server.
+Please see the [documentation](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/#webhook-conversion)
+for more details.
+
+The webhook server code is adapted from the [webhook server](https://github.com/kubernetes/kubernetes/blob/v1.25.3/test/images/agnhost/crd-conversion-webhook/main.go)
+used in the kubernetes/kubernetes e2e testing code.
+
+### Example in-cluster deployment using Kubernetes Secrets
+
+Please note this is not considered to be a production ready method to deploy the certificates and is only provided
+for demo purposes. This is only one of many ways to deploy the certificates, it is your responsibility to
+ensure the security of your cluster.
+
+TLS certificates and private keys should be handled with care and you may not want to keep them in plain
+Kubernetes secrets.
+
+#### Method
+
+These commands should be run from the top level directory.
+
+1. Run the `create-cert.sh` script. Note using the default namespace will allow anyone with access to that namespace to read your secret. It is recommended to change the namespace in all the files and the commands given below.
+
+
+ ```bash
+ # This script will create a TLS certificate signed by the [cluster](https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/). It will place the public and private key into a secret on the cluster.
+ ./deploy/kubernetes/webhook-example/create-cert.sh --service snapshot-conversion-webhook-service --secret snapshot-conversion-webhook-secret --namespace default # Make sure to use a different namespace
+ ```
+
+2. Patch the VolumeGroupSnapshotContent CRD filling in the CA bundle field.
+
+ ```bash
+ ./deploy/kubernetes/webhook-example/patch-ca-bundle.sh
+ ```
+
+3. Change the namespace in the service and deployment in the `webhook.yaml` file.
+
+4. Create the deployment, service, RBAC, and admission configuration objects on the cluster.
+
+ ```bash
+ kubectl apply -f ./deploy/kubernetes/webhook-example
+ ```
+
+Once all the pods from the deployment are up and running, you should be ready to go.
+
+#### Verify the webhook works
+
+Try to query the API server for a VolumeGroupSnapshotContent object in the version `v1beta1`.
+
+```bash
+kubectl get volumegroupsnapshotcontent.v1beta1.groupsnapshot.storage.k8s.io
+```
+
+### Other methods to deploy the webhook server
+
+Look into [cert-manager](https://cert-manager.io/) to handle the certificates,
+and this kube-builder [tutorial](https://book.kubebuilder.io/cronjob-tutorial/cert-manager.html) on how to deploy a webhook.
+
+#### Important
+
+Please see the deployment [yaml](./webhook.yaml) for the arguments expected by the
+webhook server. The conversion webhook is served at the path `/convert`.
diff --git a/deploy/kubernetes/webhook-example/create-cert.sh b/deploy/kubernetes/webhook-example/create-cert.sh
new file mode 100755
index 000000000..68baa8878
--- /dev/null
+++ b/deploy/kubernetes/webhook-example/create-cert.sh
@@ -0,0 +1,128 @@
+#!/bin/bash
+
+set -e
+
+usage() {
+ cat <> ${tmpdir}/csr.conf
+[req]
+req_extensions = v3_req
+distinguished_name = req_distinguished_name
+[req_distinguished_name]
+[ v3_req ]
+basicConstraints = CA:FALSE
+keyUsage = nonRepudiation, digitalSignature, keyEncipherment
+extendedKeyUsage = serverAuth
+subjectAltName = @alt_names
+[alt_names]
+DNS.1 = ${service}
+DNS.2 = ${service}.${namespace}
+DNS.3 = ${service}.${namespace}.svc
+EOF
+
+openssl genrsa -out ${tmpdir}/server-key.pem 2048
+openssl req -new -key ${tmpdir}/server-key.pem -subj "/CN=system:node:${service}.${namespace}.svc;/O=system:nodes" -out ${tmpdir}/server.csr -config ${tmpdir}/csr.conf
+
+# clean-up any previously created CSR for our service. Ignore errors if not present.
+kubectl delete csr ${csrName} 2>/dev/null || true
+
+# create server cert/key CSR and send to k8s API
+cat <&2
+ exit 1
+fi
+echo ${serverCert} | openssl base64 -d -A -out ${tmpdir}/server-cert.pem
+
+
+# create the secret with CA cert and server cert/key
+kubectl create secret tls ${secret} \
+ --key=${tmpdir}/server-key.pem \
+ --cert=${tmpdir}/server-cert.pem \
+ --dry-run=client -o yaml |
+ kubectl -n ${namespace} apply -f -
diff --git a/deploy/kubernetes/webhook-example/patch-ca-bundle.sh b/deploy/kubernetes/webhook-example/patch-ca-bundle.sh
new file mode 100755
index 000000000..9e410c62f
--- /dev/null
+++ b/deploy/kubernetes/webhook-example/patch-ca-bundle.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+ROOT=$(cd $(dirname $0)/../../; pwd)
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+CA_BUNDLE=$( kubectl get secret snapshot-conversion-webhook-secret -o json | jq -r '.data."tls.crt"' )
+JSON_PATCH="{\"spec\":{\"conversion\": {\"webhook\": {\"clientConfig\": {\"caBundle\": \"${CA_BUNDLE}\"}}}}}"
+
+kubectl patch crd volumegroupsnapshotcontents.groupsnapshot.storage.k8s.io -p "${JSON_PATCH}"
diff --git a/deploy/kubernetes/webhook-example/webhook.yaml b/deploy/kubernetes/webhook-example/webhook.yaml
new file mode 100644
index 000000000..5a04beeb2
--- /dev/null
+++ b/deploy/kubernetes/webhook-example/webhook.yaml
@@ -0,0 +1,48 @@
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: snapshot-conversion-webhook-deployment
+ namespace: default # NOTE: change the namespace
+ labels:
+ app.kubernetes.io/name: snapshot-conversion-webhook
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: snapshot-conversion-webhook
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: snapshot-conversion-webhook
+ spec:
+ containers:
+ - name: snapshot-conversion-webhook
+ image: registry.k8s.io/sig-storage/snapshot-conversion-webhook:v8.0.1 # change the image if you wish to use your own custom conversion server image
+ imagePullPolicy: IfNotPresent
+ args:
+ - '--tls-cert-file=/etc/snapshot-conversion-webhook/certs/tls.crt'
+ - '--tls-private-key-file=/etc/snapshot-conversion-webhook/certs/tls.key'
+ ports:
+ - containerPort: 443 # change the port as needed
+ volumeMounts:
+ - name: snapshot-conversion-webhook-certs
+ mountPath: /etc/snapshot-conversion-webhook/certs
+ readOnly: true
+ volumes:
+ - name: snapshot-conversion-webhook-certs
+ secret:
+ secretName: snapshot-conversion-webhook-secret
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: snapshot-conversion-webhook-service
+ namespace: default # NOTE: change the namespace
+spec:
+ selector:
+ app.kubernetes.io/name: snapshot-conversion-webhook
+ ports:
+ - protocol: TCP
+ port: 443 # Change if needed
+ targetPort: 443 # Change if the webserver image expects a different port
diff --git a/go.mod b/go.mod
index 98c98e06b..8ffce4b76 100644
--- a/go.mod
+++ b/go.mod
@@ -2,37 +2,49 @@ module github.com/kubernetes-csi/external-snapshotter/v8
go 1.24.0
-toolchain go1.24.4
+toolchain go1.24.6
require (
- github.com/container-storage-interface/spec v1.11.0
+ github.com/container-storage-interface/spec v1.12.0
github.com/evanphx/json-patch v5.9.0+incompatible
+ github.com/fsnotify/fsnotify v1.9.0
github.com/golang/mock v1.6.0
+ github.com/google/go-cmp v0.7.0
github.com/kubernetes-csi/csi-lib-utils v0.22.0
- github.com/kubernetes-csi/csi-test/v5 v5.3.1
+ github.com/kubernetes-csi/csi-test/v5 v5.4.0
github.com/kubernetes-csi/external-snapshotter/client/v8 v8.2.0
+ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
github.com/prometheus/client_golang v1.22.0
github.com/prometheus/client_model v0.6.1
github.com/prometheus/common v0.62.0
- google.golang.org/grpc v1.69.0
+ google.golang.org/grpc v1.72.1
google.golang.org/protobuf v1.36.5
- k8s.io/api v0.33.1
- k8s.io/apimachinery v0.33.1
- k8s.io/apiserver v0.33.0
- k8s.io/client-go v0.33.1
- k8s.io/component-base v0.33.1
- k8s.io/component-helpers v0.33.0
+ k8s.io/api v0.34.0
+ k8s.io/apiextensions-apiserver v0.34.0
+ k8s.io/apimachinery v0.34.0
+ k8s.io/apiserver v0.34.0
+ k8s.io/client-go v0.34.0
+ k8s.io/component-base v0.34.0
+ k8s.io/component-helpers v0.34.0
k8s.io/klog/v2 v2.130.1
- k8s.io/utils v0.0.0-20241210054802-24370beab758
+ k8s.io/utils v0.0.0-20250604170112-4c0f3b243397
+ sigs.k8s.io/yaml v1.6.0
)
require (
+ cel.dev/expr v0.24.0 // indirect
+ github.com/NYTimes/gziphandler v1.1.1 // indirect
+ github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
+ github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
+ github.com/coreos/go-semver v0.3.1 // indirect
+ github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
- github.com/emicklei/go-restful/v3 v3.12.1 // indirect
- github.com/fxamacker/cbor/v2 v2.7.0 // indirect
+ github.com/emicklei/go-restful/v3 v3.12.2 // indirect
+ github.com/felixge/httpsnoop v1.0.4 // indirect
+ github.com/fxamacker/cbor/v2 v2.9.0 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-logr/zapr v1.3.0 // indirect
@@ -40,60 +52,125 @@ require (
github.com/go-openapi/jsonreference v0.21.0 // indirect
github.com/go-openapi/swag v0.23.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
- github.com/google/gnostic-models v0.6.9 // indirect
- github.com/google/go-cmp v0.7.0 // indirect
+ github.com/golang/protobuf v1.5.4 // indirect
+ github.com/google/cel-go v0.26.0 // indirect
+ github.com/google/gnostic-models v0.7.0 // indirect
github.com/google/uuid v1.6.0 // indirect
+ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
+ github.com/kylelemons/godebug v1.1.0 // indirect
github.com/mailru/easyjson v0.9.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
- github.com/modern-go/reflect2 v1.0.2 // indirect
- github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
+ github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
github.com/pkg/errors v0.9.1 // indirect
+ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
- github.com/spf13/cobra v1.8.1 // indirect
- github.com/spf13/pflag v1.0.5 // indirect
+ github.com/spf13/cobra v1.9.1 // indirect
+ github.com/spf13/pflag v1.0.6 // indirect
+ github.com/stoewer/go-strcase v1.3.0 // indirect
github.com/x448/float16 v0.8.4 // indirect
+ go.etcd.io/etcd/api/v3 v3.6.4 // indirect
+ go.etcd.io/etcd/client/pkg/v3 v3.6.4 // indirect
+ go.etcd.io/etcd/client/v3 v3.6.4 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
- go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 // indirect
- go.opentelemetry.io/otel v1.33.0 // indirect
- go.opentelemetry.io/otel/metric v1.33.0 // indirect
- go.opentelemetry.io/otel/trace v1.33.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect
+ go.opentelemetry.io/otel v1.35.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 // indirect
+ go.opentelemetry.io/otel/metric v1.35.0 // indirect
+ go.opentelemetry.io/otel/sdk v1.34.0 // indirect
+ go.opentelemetry.io/otel/trace v1.35.0 // indirect
+ go.opentelemetry.io/proto/otlp v1.5.0 // indirect
go.uber.org/automaxprocs v1.6.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
+ go.yaml.in/yaml/v2 v2.4.2 // indirect
+ go.yaml.in/yaml/v3 v3.0.4 // indirect
+ golang.org/x/crypto v0.37.0 // indirect
+ golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
golang.org/x/net v0.39.0 // indirect
golang.org/x/oauth2 v0.27.0 // indirect
+ golang.org/x/sync v0.13.0 // indirect
golang.org/x/sys v0.32.0 // indirect
golang.org/x/term v0.31.0 // indirect
golang.org/x/text v0.24.0 // indirect
golang.org/x/time v0.9.0 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20241216192217-9240e9c98484 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
- k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect
+ k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect
+ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
sigs.k8s.io/randfill v1.0.0 // indirect
- sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect
- sigs.k8s.io/yaml v1.4.0 // indirect
+ sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
)
replace github.com/kubernetes-csi/external-snapshotter/client/v8 => ./client
-replace k8s.io/api => k8s.io/api v0.33.0
+replace k8s.io/api => k8s.io/api v0.34.0
-replace k8s.io/apimachinery => k8s.io/apimachinery v0.33.0
+replace k8s.io/apimachinery => k8s.io/apimachinery v0.34.0
-replace k8s.io/apiserver => k8s.io/apiserver v0.33.0
+replace k8s.io/apiserver => k8s.io/apiserver v0.34.0
-replace k8s.io/client-go => k8s.io/client-go v0.33.0
+replace k8s.io/client-go => k8s.io/client-go v0.34.0
-replace k8s.io/code-generator => k8s.io/code-generator v0.33.0
+replace k8s.io/code-generator => k8s.io/code-generator v0.34.0
-replace k8s.io/component-base => k8s.io/component-base v0.33.0
+replace k8s.io/component-base => k8s.io/component-base v0.34.0
-replace k8s.io/component-helpers => k8s.io/component-helpers v0.33.0
+replace k8s.io/component-helpers => k8s.io/component-helpers v0.34.0
-replace k8s.io/kms => k8s.io/kms v0.33.0
+replace k8s.io/kms => k8s.io/kms v0.34.0
+
+replace k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.34.0
+
+replace k8s.io/cli-runtime => k8s.io/cli-runtime v0.34.0
+
+replace k8s.io/cloud-provider => k8s.io/cloud-provider v0.34.0
+
+replace k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.34.0
+
+replace k8s.io/controller-manager => k8s.io/controller-manager v0.34.0
+
+replace k8s.io/cri-api => k8s.io/cri-api v0.34.0
+
+replace k8s.io/cri-client => k8s.io/cri-client v0.34.0
+
+replace k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.34.0
+
+replace k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.34.0
+
+replace k8s.io/endpointslice => k8s.io/endpointslice v0.34.0
+
+replace k8s.io/externaljwt => k8s.io/externaljwt v0.34.0
+
+replace k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.34.0
+
+replace k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.34.0
+
+replace k8s.io/kube-proxy => k8s.io/kube-proxy v0.34.0
+
+replace k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.34.0
+
+replace k8s.io/kubectl => k8s.io/kubectl v0.34.0
+
+replace k8s.io/kubelet => k8s.io/kubelet v0.34.0
+
+replace k8s.io/metrics => k8s.io/metrics v0.34.0
+
+replace k8s.io/mount-utils => k8s.io/mount-utils v0.34.0
+
+replace k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.34.0
+
+replace k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.34.0
+
+replace k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.34.0
+
+replace k8s.io/sample-controller => k8s.io/sample-controller v0.34.0
diff --git a/go.sum b/go.sum
index 561f7ae4b..885e1163d 100644
--- a/go.sum
+++ b/go.sum
@@ -1,22 +1,40 @@
+cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY=
+cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw=
+github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=
+github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
+github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
+github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
+github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
+github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/container-storage-interface/spec v1.11.0 h1:H/YKTOeUZwHtyPOr9raR+HgFmGluGCklulxDYxSdVNM=
-github.com/container-storage-interface/spec v1.11.0/go.mod h1:DtUvaQszPml1YJfIK7c00mlv6/g4wNMLanLgiUbKFRI=
-github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/container-storage-interface/spec v1.12.0 h1:zrFOEqpR5AghNaaDG4qyedwPBqU2fU0dWjLQMP/azK0=
+github.com/container-storage-interface/spec v1.12.0/go.mod h1:txsm+MA2B2WDa5kW69jNbqPnvTtfvZma7T/zsAZ9qX8=
+github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
+github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
+github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
+github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
+github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU=
-github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
+github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
+github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
+github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU=
+github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls=
github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
-github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
-github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
+github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
+github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
+github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
+github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
+github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
+github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
@@ -30,27 +48,44 @@ github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF
github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4=
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
-github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
+github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
+github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
-github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw=
-github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw=
-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
+github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
+github.com/google/cel-go v0.26.0 h1:DPGjXackMpJWH680oGY4lZhYjIameYmR+/6RBdDGmaI=
+github.com/google/cel-go v0.26.0/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PUIaryMM=
+github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo=
+github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=
-github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
+github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg=
+github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
+github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
+github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1 h1:qnpSQwGEnkcRpTqNOIR6bJbR0gAorgP9CSALpRcKoAA=
+github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1/go.mod h1:lXGCsh6c22WGtjr+qGHj1otzZpV/1kwTMAqkwZsnWRU=
+github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.0 h1:FbSCl+KggFl+Ocym490i/EyXF4lPgLoUtcSWquBM0Rs=
+github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.0/go.mod h1:qOchhhIlmRcqk/O9uCo/puJlyo07YINaIqdZfZG3Jkc=
+github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
+github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/jonboulle/clockwork v0.5.0 h1:Hyh9A8u51kptdkR+cqRpT1EebBwTn1oK9YfGYbdFz6I=
+github.com/jonboulle/clockwork v0.5.0/go.mod h1:3mZlmanh0g2NDKO5TWZVJAfofYk64M7XN3SzBPjZF60=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
@@ -65,8 +100,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kubernetes-csi/csi-lib-utils v0.22.0 h1:EUAs1+uHGps3OtVj4XVx16urhpI02eu+Z8Vps6plpHY=
github.com/kubernetes-csi/csi-lib-utils v0.22.0/go.mod h1:f+PalKyS4Ujsjb9+m6Rj0W6c28y3nfea3paQ/VqjI28=
-github.com/kubernetes-csi/csi-test/v5 v5.3.1 h1:Wiukp1In+kif+BFo6q2ExjgB+MbrAz4jZWzGfijypuY=
-github.com/kubernetes-csi/csi-test/v5 v5.3.1/go.mod h1:7hA2cSYJ6T8CraEZPA6zqkLZwemjBD54XAnPsPC3VpA=
+github.com/kubernetes-csi/csi-test/v5 v5.4.0 h1:u5DgYNIreSNO2+u4Nq2Wpl+bbakRSjNyxZHmDTAqnYA=
+github.com/kubernetes-csi/csi-test/v5 v5.4.0/go.mod h1:anAJKFUb/SdHhIHECgSKxC5LSiLzib+1I6mrWF5Hve8=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4=
@@ -74,14 +109,15 @@ github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUt
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8=
+github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
-github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM=
-github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
-github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4=
-github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
+github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg=
+github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
+github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw=
+github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
@@ -100,35 +136,72 @@ github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoG
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
-github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
-github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
-github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
+github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
+github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
+github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
+github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
+github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
+github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
+github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs=
+github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
+github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510 h1:S2dVYn90KE98chqDkyE9Z4N61UnQd+KOfgp5Iu53llk=
+github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+go.etcd.io/bbolt v1.4.2 h1:IrUHp260R8c+zYx/Tm8QZr04CX+qWS5PGfPdevhdm1I=
+go.etcd.io/bbolt v1.4.2/go.mod h1:Is8rSHO/b4f3XigBC0lL0+4FwAQv3HXEEIgFMuKHceM=
+go.etcd.io/etcd/api/v3 v3.6.4 h1:7F6N7toCKcV72QmoUKa23yYLiiljMrT4xCeBL9BmXdo=
+go.etcd.io/etcd/api/v3 v3.6.4/go.mod h1:eFhhvfR8Px1P6SEuLT600v+vrhdDTdcfMzmnxVXXSbk=
+go.etcd.io/etcd/client/pkg/v3 v3.6.4 h1:9HBYrjppeOfFjBjaMTRxT3R7xT0GLK8EJMVC4xg6ok0=
+go.etcd.io/etcd/client/pkg/v3 v3.6.4/go.mod h1:sbdzr2cl3HzVmxNw//PH7aLGVtY4QySjQFuaCgcRFAI=
+go.etcd.io/etcd/client/v3 v3.6.4 h1:YOMrCfMhRzY8NgtzUsHl8hC2EBSnuqbR3dh84Uryl7A=
+go.etcd.io/etcd/client/v3 v3.6.4/go.mod h1:jaNNHCyg2FdALyKWnd7hxZXZxZANb0+KGY+YQaEMISo=
+go.etcd.io/etcd/pkg/v3 v3.6.4 h1:fy8bmXIec1Q35/jRZ0KOes8vuFxbvdN0aAFqmEfJZWA=
+go.etcd.io/etcd/pkg/v3 v3.6.4/go.mod h1:kKcYWP8gHuBRcteyv6MXWSN0+bVMnfgqiHueIZnKMtE=
+go.etcd.io/etcd/server/v3 v3.6.4 h1:LsCA7CzjVt+8WGrdsnh6RhC0XqCsLkBly3ve5rTxMAU=
+go.etcd.io/etcd/server/v3 v3.6.4/go.mod h1:aYCL/h43yiONOv0QIR82kH/2xZ7m+IWYjzRmyQfnCAg=
+go.etcd.io/raft/v3 v3.6.0 h1:5NtvbDVYpnfZWcIHgGRk9DyzkBIXOi8j+DDp1IcnUWQ=
+go.etcd.io/raft/v3 v3.6.0/go.mod h1:nLvLevg6+xrVtHUmVaTcTz603gQPHfh7kUAwV6YpfGo=
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 h1:PS8wXpbyaDJQ2VDHHncMe9Vct0Zn1fEjpsjrLxGJoSc=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0/go.mod h1:HDBUsEjOuRC0EzKZ1bSaRGZWUBAzo+MhAcUUORSr4D0=
-go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw=
-go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I=
-go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ=
-go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M=
-go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM=
-go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM=
-go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc=
-go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8=
-go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s=
-go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 h1:x7wzEgXfnzJcHDwStJT+mxOz4etr2EcexjqhBvmoakw=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0/go.mod h1:rg+RlpR5dKwaS95IyyZqj5Wd4E13lk/msnTS0Xl9lJM=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q=
+go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ=
+go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 h1:OeNbIYk/2C15ckl7glBlOBp5+WlYsOElzTNmiPW/x60=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0/go.mod h1:7Bept48yIeqxP2OZ9/AqIpYS94h2or0aB4FypJTc8ZM=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 h1:tgJ0uaNS4c98WRNUEx5U3aDlrDOI5Rs+1Vifcw4DJ8U=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0/go.mod h1:U7HYyW0zt/a9x5J1Kjs+r1f/d4ZHnYFclhYY2+YbeoE=
+go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M=
+go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE=
+go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A=
+go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU=
+go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk=
+go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w=
+go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs=
+go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc=
+go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4=
+go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4=
go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
@@ -137,9 +210,17 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
+go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
+go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
+go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
+go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE=
+golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc=
+golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
+golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
@@ -156,6 +237,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610=
+golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -178,16 +261,18 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ=
-golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0=
+golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8=
+golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20241216192217-9240e9c98484 h1:Z7FRVJPSMaHQxD0uXU8WdgFh8PseLM8Q8NzhnpMrBhQ=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20241216192217-9240e9c98484/go.mod h1:lcTa1sDdWEIHMWlITnIczmw5w60CF9ffkb8Z+DVmmjA=
-google.golang.org/grpc v1.69.0 h1:quSiOM1GJPmPH5XtU+BCoVXcDVJJAzNcoyfC2cCjGkI=
-google.golang.org/grpc v1.69.0/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
+google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1:p31xT4yrYrSM/G4Sn2+TNUkVhFCbG9y8itM2S6Th950=
+google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb h1:TLPQVbx1GJ8VKZxz52VAxl1EBgKXXbTiU9Fc5fZeLn4=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I=
+google.golang.org/grpc v1.72.1 h1:HR03wO6eyZ7lknl75XlxABNVLLFc2PAb6mHlYh756mA=
+google.golang.org/grpc v1.72.1/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM=
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -197,32 +282,38 @@ gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSP
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
+gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-k8s.io/api v0.33.0 h1:yTgZVn1XEe6opVpP1FylmNrIFWuDqe2H0V8CT5gxfIU=
-k8s.io/api v0.33.0/go.mod h1:CTO61ECK/KU7haa3qq8sarQ0biLq2ju405IZAd9zsiM=
-k8s.io/apimachinery v0.33.0 h1:1a6kHrJxb2hs4t8EE5wuR/WxKDwGN1FKH3JvDtA0CIQ=
-k8s.io/apimachinery v0.33.0/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM=
-k8s.io/apiserver v0.33.0 h1:QqcM6c+qEEjkOODHppFXRiw/cE2zP85704YrQ9YaBbc=
-k8s.io/apiserver v0.33.0/go.mod h1:EixYOit0YTxt8zrO2kBU7ixAtxFce9gKGq367nFmqI8=
-k8s.io/client-go v0.33.0 h1:UASR0sAYVUzs2kYuKn/ZakZlcs2bEHaizrrHUZg0G98=
-k8s.io/client-go v0.33.0/go.mod h1:kGkd+l/gNGg8GYWAPr0xF1rRKvVWvzh9vmZAMXtaKOg=
-k8s.io/component-base v0.33.0 h1:Ot4PyJI+0JAD9covDhwLp9UNkUja209OzsJ4FzScBNk=
-k8s.io/component-base v0.33.0/go.mod h1:aXYZLbw3kihdkOPMDhWbjGCO6sg+luw554KP51t8qCU=
-k8s.io/component-helpers v0.33.0 h1:0AdW0A0mIgljLgtG0hJDdJl52PPqTrtMgOgtm/9i/Ys=
-k8s.io/component-helpers v0.33.0/go.mod h1:9SRiXfLldPw9lEEuSsapMtvT8j/h1JyFFapbtybwKvU=
+k8s.io/api v0.34.0 h1:L+JtP2wDbEYPUeNGbeSa/5GwFtIA662EmT2YSLOkAVE=
+k8s.io/api v0.34.0/go.mod h1:YzgkIzOOlhl9uwWCZNqpw6RJy9L2FK4dlJeayUoydug=
+k8s.io/apiextensions-apiserver v0.34.0 h1:B3hiB32jV7BcyKcMU5fDaDxk882YrJ1KU+ZSkA9Qxoc=
+k8s.io/apiextensions-apiserver v0.34.0/go.mod h1:hLI4GxE1BDBy9adJKxUxCEHBGZtGfIg98Q+JmTD7+g0=
+k8s.io/apimachinery v0.34.0 h1:eR1WO5fo0HyoQZt1wdISpFDffnWOvFLOOeJ7MgIv4z0=
+k8s.io/apimachinery v0.34.0/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw=
+k8s.io/apiserver v0.34.0 h1:Z51fw1iGMqN7uJ1kEaynf2Aec1Y774PqU+FVWCFV3Jg=
+k8s.io/apiserver v0.34.0/go.mod h1:52ti5YhxAvewmmpVRqlASvaqxt0gKJxvCeW7ZrwgazQ=
+k8s.io/client-go v0.34.0 h1:YoWv5r7bsBfb0Hs2jh8SOvFbKzzxyNo0nSb0zC19KZo=
+k8s.io/client-go v0.34.0/go.mod h1:ozgMnEKXkRjeMvBZdV1AijMHLTh3pbACPvK7zFR+QQY=
+k8s.io/component-base v0.34.0 h1:bS8Ua3zlJzapklsB1dZgjEJuJEeHjj8yTu1gxE2zQX8=
+k8s.io/component-base v0.34.0/go.mod h1:RSCqUdvIjjrEm81epPcjQ/DS+49fADvGSCkIP3IC6vg=
+k8s.io/component-helpers v0.34.0 h1:5T7P9XGMoUy1JDNKzHf0p/upYbeUf8ZaSf9jbx0QlIo=
+k8s.io/component-helpers v0.34.0/go.mod h1:kaOyl5tdtnymriYcVZg4uwDBe2d1wlIpXyDkt6sVnt4=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
-k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4=
-k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8=
-k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0=
-k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA=
+k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts=
+k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y=
+k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUoEKRkHKSmGjxb6lWwrBlJsXc+eUYQHM=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw=
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE=
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
-sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
-sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc=
-sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
-sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
-sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
+sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco=
+sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
+sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
+sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=
diff --git a/pkg/common-controller/framework_test.go b/pkg/common-controller/framework_test.go
index cd45808bc..ecde1c100 100644
--- a/pkg/common-controller/framework_test.go
+++ b/pkg/common-controller/framework_test.go
@@ -34,13 +34,14 @@ import (
"time"
jsonpatch "github.com/evanphx/json-patch"
- crdv1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta1"
+ "github.com/google/go-cmp/cmp"
+ crdv1beta2 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta2"
crdv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
clientset "github.com/kubernetes-csi/external-snapshotter/client/v8/clientset/versioned"
"github.com/kubernetes-csi/external-snapshotter/client/v8/clientset/versioned/fake"
snapshotscheme "github.com/kubernetes-csi/external-snapshotter/client/v8/clientset/versioned/scheme"
informers "github.com/kubernetes-csi/external-snapshotter/client/v8/informers/externalversions"
- groupstoragelisters "github.com/kubernetes-csi/external-snapshotter/client/v8/listers/volumegroupsnapshot/v1beta1"
+ groupstoragelisters "github.com/kubernetes-csi/external-snapshotter/client/v8/listers/volumegroupsnapshot/v1beta2"
storagelisters "github.com/kubernetes-csi/external-snapshotter/client/v8/listers/volumesnapshot/v1"
"github.com/kubernetes-csi/external-snapshotter/v8/pkg/metrics"
"github.com/kubernetes-csi/external-snapshotter/v8/pkg/utils"
@@ -50,7 +51,6 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
- "k8s.io/apimachinery/pkg/util/diff"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
coreinformers "k8s.io/client-go/informers"
@@ -102,13 +102,13 @@ type controllerTest struct {
// Expected content of controller snapshot cache at the end of the test.
expectedSnapshots []*crdv1.VolumeSnapshot
// Initial content of controller content cache.
- initialGroupSnapshots []*crdv1beta1.VolumeGroupSnapshot
+ initialGroupSnapshots []*crdv1beta2.VolumeGroupSnapshot
// Expected content of controller content cache at the end of the test.
- expectedGroupSnapshots []*crdv1beta1.VolumeGroupSnapshot
+ expectedGroupSnapshots []*crdv1beta2.VolumeGroupSnapshot
// Initial content of controller content cache.
- initialGroupContents []*crdv1beta1.VolumeGroupSnapshotContent
+ initialGroupContents []*crdv1beta2.VolumeGroupSnapshotContent
// Expected content of controller content cache at the end of the test.
- expectedGroupContents []*crdv1beta1.VolumeGroupSnapshotContent
+ expectedGroupContents []*crdv1beta2.VolumeGroupSnapshotContent
// Initial content of controller volume cache.
initialVolumes []*v1.PersistentVolume
// Initial content of controller claim cache.
@@ -135,9 +135,9 @@ const (
var (
errVersionConflict = errors.New("VersionError")
nocontents []*crdv1.VolumeSnapshotContent
- nogroupcontents []*crdv1beta1.VolumeGroupSnapshotContent
+ nogroupcontents []*crdv1beta2.VolumeGroupSnapshotContent
nosnapshots []*crdv1.VolumeSnapshot
- nogroupsnapshots []*crdv1beta1.VolumeGroupSnapshot
+ nogroupsnapshots []*crdv1beta2.VolumeGroupSnapshot
noevents = []string{}
noerrors = []reactorError{}
)
@@ -166,9 +166,9 @@ type snapshotReactor struct {
contents map[string]*crdv1.VolumeSnapshotContent
snapshots map[string]*crdv1.VolumeSnapshot
snapshotClasses map[string]*crdv1.VolumeSnapshotClass
- groupContents map[string]*crdv1beta1.VolumeGroupSnapshotContent
- groupSnapshots map[string]*crdv1beta1.VolumeGroupSnapshot
- groupSnapshotClasses map[string]*crdv1beta1.VolumeGroupSnapshotClass
+ groupContents map[string]*crdv1beta2.VolumeGroupSnapshotContent
+ groupSnapshots map[string]*crdv1beta2.VolumeGroupSnapshot
+ groupSnapshotClasses map[string]*crdv1beta2.VolumeGroupSnapshotClass
changedObjects []interface{}
changedSinceLastSync int
ctrl *csiSnapshotCommonController
@@ -244,7 +244,7 @@ func withSnapshotFinalizers(snapshots []*crdv1.VolumeSnapshot, finalizers ...str
return snapshots
}
-func withGroupSnapshotFinalizers(groupSnapshots []*crdv1beta1.VolumeGroupSnapshot, finalizers ...string) []*crdv1beta1.VolumeGroupSnapshot {
+func withGroupSnapshotFinalizers(groupSnapshots []*crdv1beta2.VolumeGroupSnapshot, finalizers ...string) []*crdv1beta2.VolumeGroupSnapshot {
for i := range groupSnapshots {
for _, f := range finalizers {
groupSnapshots[i].ObjectMeta.Finalizers = append(groupSnapshots[i].ObjectMeta.Finalizers, f)
@@ -300,7 +300,7 @@ func (r *snapshotReactor) React(action core.Action) (handled bool, ret runtime.O
case action.Matches("create", "volumegroupsnapshotcontents"):
obj := action.(core.UpdateAction).GetObject()
- content := obj.(*crdv1beta1.VolumeGroupSnapshotContent)
+ content := obj.(*crdv1beta2.VolumeGroupSnapshotContent)
// check the content does not exist
_, found := r.contents[content.Name]
@@ -343,7 +343,7 @@ func (r *snapshotReactor) React(action core.Action) (handled bool, ret runtime.O
case action.Matches("update", "volumegroupsnapshotcontents"):
obj := action.(core.UpdateAction).GetObject()
- content := obj.(*crdv1beta1.VolumeGroupSnapshotContent)
+ content := obj.(*crdv1beta2.VolumeGroupSnapshotContent)
// Check and bump object version
storedVolume, found := r.contents[content.Name]
@@ -408,7 +408,7 @@ func (r *snapshotReactor) React(action core.Action) (handled bool, ret runtime.O
return true, content, nil
case action.Matches("patch", "volumegroupsnapshotcontents"):
- content := &crdv1beta1.VolumeGroupSnapshotContent{}
+ content := &crdv1beta2.VolumeGroupSnapshotContent{}
action := action.(core.PatchAction)
// Check and bump object version
@@ -475,7 +475,7 @@ func (r *snapshotReactor) React(action core.Action) (handled bool, ret runtime.O
case action.Matches("update", "volumegroupsnapshots"):
obj := action.(core.UpdateAction).GetObject()
- groupSnapshot := obj.(*crdv1beta1.VolumeGroupSnapshot)
+ groupSnapshot := obj.(*crdv1beta2.VolumeGroupSnapshot)
// Check and bump object version
storedGroupSnapshot, found := r.groupSnapshots[groupSnapshot.Name]
@@ -789,19 +789,19 @@ func (r *snapshotReactor) checkContents(expectedContents []*crdv1.VolumeSnapshot
if !reflect.DeepEqual(expectedMap, gotMap) {
// Print ugly but useful diff of expected and received objects for
// easier debugging.
- return fmt.Errorf("content check failed [A-expected, B-got]: %s", diff.ObjectDiff(expectedMap, gotMap))
+ return fmt.Errorf("content check failed [A-expected, B-got]: %s", cmp.Diff(expectedMap, gotMap))
}
return nil
}
// checkGroupContents compares all expectedGroupContents with set of contents at the end of
// the test and reports differences.
-func (r *snapshotReactor) checkGroupContents(expectedGroupContents []*crdv1beta1.VolumeGroupSnapshotContent) error {
+func (r *snapshotReactor) checkGroupContents(expectedGroupContents []*crdv1beta2.VolumeGroupSnapshotContent) error {
r.lock.Lock()
defer r.lock.Unlock()
- expectedMap := make(map[string]*crdv1beta1.VolumeGroupSnapshotContent)
- gotMap := make(map[string]*crdv1beta1.VolumeGroupSnapshotContent)
+ expectedMap := make(map[string]*crdv1beta2.VolumeGroupSnapshotContent)
+ gotMap := make(map[string]*crdv1beta2.VolumeGroupSnapshotContent)
// Clear any ResourceVersion from both sets
for _, v := range expectedGroupContents {
// Don't modify the existing object
@@ -828,7 +828,7 @@ func (r *snapshotReactor) checkGroupContents(expectedGroupContents []*crdv1beta1
if !reflect.DeepEqual(expectedMap, gotMap) {
// Print ugly but useful diff of expected and received objects for
// easier debugging.
- return fmt.Errorf("content check failed [A-expected, B-got]: %s", diff.ObjectDiff(expectedMap, gotMap))
+ return fmt.Errorf("content check failed [A-expected, B-got]: %s", cmp.Diff(expectedMap, gotMap))
}
return nil
}
@@ -863,19 +863,19 @@ func (r *snapshotReactor) checkSnapshots(expectedSnapshots []*crdv1.VolumeSnapsh
if !reflect.DeepEqual(expectedMap, gotMap) {
// Print ugly but useful diff of expected and received objects for
// easier debugging.
- return fmt.Errorf("snapshot check failed [A-expected, B-got result]: %s", diff.ObjectDiff(expectedMap, gotMap))
+ return fmt.Errorf("snapshot check failed [A-expected, B-got result]: %s", cmp.Diff(expectedMap, gotMap))
}
return nil
}
// checkGroupSnapshots compares all expectedGroupSnapshots with set of snapshots at the end of the
// test and reports differences.
-func (r *snapshotReactor) checkGroupSnapshots(expectedGroupSnapshots []*crdv1beta1.VolumeGroupSnapshot) error {
+func (r *snapshotReactor) checkGroupSnapshots(expectedGroupSnapshots []*crdv1beta2.VolumeGroupSnapshot) error {
r.lock.Lock()
defer r.lock.Unlock()
- expectedMap := make(map[string]*crdv1beta1.VolumeGroupSnapshot)
- gotMap := make(map[string]*crdv1beta1.VolumeGroupSnapshot)
+ expectedMap := make(map[string]*crdv1beta2.VolumeGroupSnapshot)
+ gotMap := make(map[string]*crdv1beta2.VolumeGroupSnapshot)
for _, c := range expectedGroupSnapshots {
// Don't modify the existing object
c = c.DeepCopy()
@@ -898,7 +898,7 @@ func (r *snapshotReactor) checkGroupSnapshots(expectedGroupSnapshots []*crdv1bet
if !reflect.DeepEqual(expectedMap, gotMap) {
// Print ugly but useful diff of expected and received objects for
// easier debugging.
- return fmt.Errorf("snapshot check failed [A-expected, B-got result]: %s", diff.ObjectDiff(expectedMap, gotMap))
+ return fmt.Errorf("snapshot check failed [A-expected, B-got result]: %s", cmp.Diff(expectedMap, gotMap))
}
return nil
}
@@ -1130,9 +1130,9 @@ func newSnapshotReactor(kubeClient *kubefake.Clientset, client *fake.Clientset,
snapshotClasses: make(map[string]*crdv1.VolumeSnapshotClass),
contents: make(map[string]*crdv1.VolumeSnapshotContent),
snapshots: make(map[string]*crdv1.VolumeSnapshot),
- groupSnapshotClasses: make(map[string]*crdv1beta1.VolumeGroupSnapshotClass),
- groupContents: make(map[string]*crdv1beta1.VolumeGroupSnapshotContent),
- groupSnapshots: make(map[string]*crdv1beta1.VolumeGroupSnapshot),
+ groupSnapshotClasses: make(map[string]*crdv1beta2.VolumeGroupSnapshotClass),
+ groupContents: make(map[string]*crdv1beta2.VolumeGroupSnapshotContent),
+ groupSnapshots: make(map[string]*crdv1beta2.VolumeGroupSnapshot),
ctrl: ctrl,
fakeContentWatch: fakeVolumeWatch,
fakeSnapshotWatch: fakeClaimWatch,
@@ -1197,9 +1197,9 @@ func newTestController(kubeClient kubernetes.Interface, clientset clientset.Inte
informerFactory.Snapshot().V1().VolumeSnapshots(),
informerFactory.Snapshot().V1().VolumeSnapshotContents(),
informerFactory.Snapshot().V1().VolumeSnapshotClasses(),
- informerFactory.Groupsnapshot().V1beta1().VolumeGroupSnapshots(),
- informerFactory.Groupsnapshot().V1beta1().VolumeGroupSnapshotContents(),
- informerFactory.Groupsnapshot().V1beta1().VolumeGroupSnapshotClasses(),
+ informerFactory.Groupsnapshot().V1beta2().VolumeGroupSnapshots(),
+ informerFactory.Groupsnapshot().V1beta2().VolumeGroupSnapshotContents(),
+ informerFactory.Groupsnapshot().V1beta2().VolumeGroupSnapshotClasses(),
coreFactory.Core().V1().PersistentVolumeClaims(),
coreFactory.Core().V1().PersistentVolumes(),
nil,
@@ -1281,21 +1281,21 @@ func newContent(contentName, boundToSnapshotUID, boundToSnapshotName, snapshotHa
func newGroupSnapshotContent(groupSnapshotContentName, boundToGroupSnapshotUID, boundToGroupSnapshotName, groupSnapshotHandle, groupSnapshotClassName string, desiredVolumeHandles []string, targetVolumeGroupSnapshotHandle string,
deletionPolicy crdv1.DeletionPolicy, creationTime *metav1.Time,
- withFinalizer bool, withStatus bool) *crdv1beta1.VolumeGroupSnapshotContent {
+ withFinalizer bool, withStatus bool) *crdv1beta2.VolumeGroupSnapshotContent {
ready := true
- content := crdv1beta1.VolumeGroupSnapshotContent{
+ content := crdv1beta2.VolumeGroupSnapshotContent{
ObjectMeta: metav1.ObjectMeta{
Name: groupSnapshotContentName,
ResourceVersion: "1",
},
- Spec: crdv1beta1.VolumeGroupSnapshotContentSpec{
+ Spec: crdv1beta2.VolumeGroupSnapshotContentSpec{
Driver: mockDriverName,
DeletionPolicy: deletionPolicy,
},
}
if withStatus {
- content.Status = &crdv1beta1.VolumeGroupSnapshotContentStatus{
+ content.Status = &crdv1beta2.VolumeGroupSnapshotContentStatus{
CreationTime: creationTime,
ReadyToUse: &ready,
}
@@ -1310,7 +1310,7 @@ func newGroupSnapshotContent(groupSnapshotContentName, boundToGroupSnapshotUID,
}
if targetVolumeGroupSnapshotHandle != "" {
- content.Spec.Source.GroupSnapshotHandles = &crdv1beta1.GroupSnapshotHandles{
+ content.Spec.Source.GroupSnapshotHandles = &crdv1beta2.GroupSnapshotHandles{
VolumeGroupSnapshotHandle: targetVolumeGroupSnapshotHandle,
}
}
@@ -1322,7 +1322,7 @@ func newGroupSnapshotContent(groupSnapshotContentName, boundToGroupSnapshotUID,
if boundToGroupSnapshotName != "" {
content.Spec.VolumeGroupSnapshotRef = v1.ObjectReference{
Kind: "VolumeGroupSnapshot",
- APIVersion: "groupsnapshot.storage.k8s.io/v1beta1",
+ APIVersion: "groupsnapshot.storage.k8s.io/v1beta2",
UID: types.UID(boundToGroupSnapshotUID),
Namespace: testNamespace,
Name: boundToGroupSnapshotName,
@@ -1338,8 +1338,8 @@ func newGroupSnapshotContent(groupSnapshotContentName, boundToGroupSnapshotUID,
func newGroupSnapshotContentArray(groupSnapshotContentName, boundToGroupSnapshotUID, boundToGroupSnapshotSnapshotName, groupSnapshotHandle, groupSnapshotClassName string, desiredVolumeHandles []string, volumeGroupHandle string,
deletionPolicy crdv1.DeletionPolicy, creationTime *metav1.Time,
- withFinalizer bool, withStatus bool) []*crdv1beta1.VolumeGroupSnapshotContent {
- return []*crdv1beta1.VolumeGroupSnapshotContent{
+ withFinalizer bool, withStatus bool) []*crdv1beta2.VolumeGroupSnapshotContent {
+ return []*crdv1beta2.VolumeGroupSnapshotContent{
newGroupSnapshotContent(groupSnapshotContentName, boundToGroupSnapshotUID, boundToGroupSnapshotSnapshotName, groupSnapshotHandle, groupSnapshotClassName, desiredVolumeHandles, volumeGroupHandle,
deletionPolicy, creationTime,
withFinalizer, withStatus),
@@ -1370,12 +1370,12 @@ func withContentFinalizer(content *crdv1.VolumeSnapshotContent) *crdv1.VolumeSna
return content
}
-func withGroupContentFinalizer(content *crdv1beta1.VolumeGroupSnapshotContent) *crdv1beta1.VolumeGroupSnapshotContent {
+func withGroupContentFinalizer(content *crdv1beta2.VolumeGroupSnapshotContent) *crdv1beta2.VolumeGroupSnapshotContent {
content.ObjectMeta.Finalizers = append(content.ObjectMeta.Finalizers, utils.VolumeGroupSnapshotContentFinalizer)
return content
}
-func withGroupContentAnnotations(contents []*crdv1beta1.VolumeGroupSnapshotContent, annotations map[string]string) []*crdv1beta1.VolumeGroupSnapshotContent {
+func withGroupContentAnnotations(contents []*crdv1beta2.VolumeGroupSnapshotContent, annotations map[string]string) []*crdv1beta2.VolumeGroupSnapshotContent {
for i := range contents {
if contents[i].ObjectMeta.Annotations == nil {
contents[i].ObjectMeta.Annotations = make(map[string]string)
@@ -1485,17 +1485,17 @@ func newSnapshot(
func newGroupSnapshot(
groupSnapshotName, groupSnapshotUID string, selectors map[string]string, targetContentName, groupSnapshotClassName, boundContentName string,
readyToUse *bool, creationTime *metav1.Time,
- err *crdv1.VolumeSnapshotError, nilStatus bool, withAllFinalizers bool, deletionTimestamp *metav1.Time) *crdv1beta1.VolumeGroupSnapshot {
- groupSnapshot := crdv1beta1.VolumeGroupSnapshot{
+ err *crdv1.VolumeSnapshotError, nilStatus bool, withAllFinalizers bool, deletionTimestamp *metav1.Time) *crdv1beta2.VolumeGroupSnapshot {
+ groupSnapshot := crdv1beta2.VolumeGroupSnapshot{
ObjectMeta: metav1.ObjectMeta{
Name: groupSnapshotName,
Namespace: testNamespace,
UID: types.UID(groupSnapshotUID),
ResourceVersion: "1",
- SelfLink: "/apis/groupsnapshot.storage.k8s.io/v1beta1/namespaces/" + testNamespace + "/volumesnapshots/" + groupSnapshotName,
+ SelfLink: "/apis/groupsnapshot.storage.k8s.io/v1beta2/namespaces/" + testNamespace + "/volumesnapshots/" + groupSnapshotName,
DeletionTimestamp: deletionTimestamp,
},
- Spec: crdv1beta1.VolumeGroupSnapshotSpec{
+ Spec: crdv1beta2.VolumeGroupSnapshotSpec{
VolumeGroupSnapshotClassName: nil,
},
}
@@ -1507,7 +1507,7 @@ func newGroupSnapshot(
}
if !nilStatus {
- groupSnapshot.Status = &crdv1beta1.VolumeGroupSnapshotStatus{
+ groupSnapshot.Status = &crdv1beta2.VolumeGroupSnapshotStatus{
CreationTime: creationTime,
ReadyToUse: readyToUse,
Error: err,
@@ -1526,7 +1526,7 @@ func newGroupSnapshot(
groupSnapshot.Spec.Source.VolumeGroupSnapshotContentName = &targetContentName
}
if withAllFinalizers {
- return withGroupSnapshotFinalizers([]*crdv1beta1.VolumeGroupSnapshot{&groupSnapshot}, utils.VolumeGroupSnapshotContentFinalizer, utils.VolumeGroupSnapshotBoundFinalizer)[0]
+ return withGroupSnapshotFinalizers([]*crdv1beta2.VolumeGroupSnapshot{&groupSnapshot}, utils.VolumeGroupSnapshotContentFinalizer, utils.VolumeGroupSnapshotBoundFinalizer)[0]
}
return &groupSnapshot
}
@@ -1534,8 +1534,8 @@ func newGroupSnapshot(
func newGroupSnapshotArray(
groupSnapshotName, groupSnapshotUID string, selectors map[string]string, targetContentName, groupSnapshotClassName, boundContentName string,
readyToUse *bool, creationTime *metav1.Time,
- err *crdv1.VolumeSnapshotError, nilStatus bool, withAllFinalizers bool, deletionTimestamp *metav1.Time) []*crdv1beta1.VolumeGroupSnapshot {
- return []*crdv1beta1.VolumeGroupSnapshot{
+ err *crdv1.VolumeSnapshotError, nilStatus bool, withAllFinalizers bool, deletionTimestamp *metav1.Time) []*crdv1beta2.VolumeGroupSnapshot {
+ return []*crdv1beta2.VolumeGroupSnapshot{
newGroupSnapshot(groupSnapshotName, groupSnapshotUID, selectors, targetContentName, groupSnapshotClassName, boundContentName, readyToUse, creationTime, err, nilStatus, withAllFinalizers, deletionTimestamp),
}
}
@@ -1762,7 +1762,7 @@ func testUpdateSnapshotErrorStatus(ctrl *csiSnapshotCommonController, reactor *s
return fmt.Errorf("update snapshot status failed: expected: %v but got nil", expected)
}
if expected != nil && got != nil && !reflect.DeepEqual(expected, got) {
- return fmt.Errorf("update snapshot status failed [A-expected, B-got]: %s", diff.ObjectDiff(expected, got))
+ return fmt.Errorf("update snapshot status failed [A-expected, B-got]: %s", cmp.Diff(expected, got))
}
return nil
}
@@ -1890,7 +1890,7 @@ func evaluateTestResults(ctrl *csiSnapshotCommonController, reactor *snapshotRea
// 2. Call the tested function (syncSnapshot/syncContent) via
// controllerTest.testCall *once*.
// 3. Compare resulting contents and snapshots with expected contents and snapshots.
-func runSyncTests(t *testing.T, tests []controllerTest, snapshotClasses []*crdv1.VolumeSnapshotClass, groupSnapshotClasses []*crdv1beta1.VolumeGroupSnapshotClass) {
+func runSyncTests(t *testing.T, tests []controllerTest, snapshotClasses []*crdv1.VolumeSnapshotClass, groupSnapshotClasses []*crdv1beta2.VolumeGroupSnapshotClass) {
snapshotscheme.AddToScheme(scheme.Scheme)
for _, test := range tests {
klog.V(4).Infof("starting test %q", test.name)
diff --git a/pkg/common-controller/groupsnapshot_controller_helper.go b/pkg/common-controller/groupsnapshot_controller_helper.go
index c6f91ed4b..8b97dc4a0 100644
--- a/pkg/common-controller/groupsnapshot_controller_helper.go
+++ b/pkg/common-controller/groupsnapshot_controller_helper.go
@@ -32,7 +32,7 @@ import (
ref "k8s.io/client-go/tools/reference"
klog "k8s.io/klog/v2"
- crdv1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta1"
+ crdv1beta2 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta2"
crdv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
"github.com/kubernetes-csi/external-snapshotter/v8/pkg/metrics"
"github.com/kubernetes-csi/external-snapshotter/v8/pkg/utils"
@@ -47,7 +47,7 @@ func (ctrl *csiSnapshotCommonController) storeGroupSnapshotContentUpdate(groupsn
}
// getGroupSnapshotClass is a helper function to get group snapshot class from the group snapshot class name.
-func (ctrl *csiSnapshotCommonController) getGroupSnapshotClass(className string) (*crdv1beta1.VolumeGroupSnapshotClass, error) {
+func (ctrl *csiSnapshotCommonController) getGroupSnapshotClass(className string) (*crdv1beta2.VolumeGroupSnapshotClass, error) {
klog.V(5).Infof("getGroupSnapshotClass: VolumeGroupSnapshotClassName [%s]", className)
groupSnapshotClass, err := ctrl.groupSnapshotClassLister.Get(className)
@@ -72,7 +72,7 @@ func (ctrl *csiSnapshotCommonController) getGroupSnapshotClass(className string)
// if true, ReadyToUse will be set to false;
// otherwise, ReadyToUse will not be changed.
// - eventtype, reason, message - event to send, see EventRecorder.Event()
-func (ctrl *csiSnapshotCommonController) updateGroupSnapshotErrorStatusWithEvent(groupSnapshot *crdv1beta1.VolumeGroupSnapshot, setReadyToFalse bool, eventtype, reason, message string) error {
+func (ctrl *csiSnapshotCommonController) updateGroupSnapshotErrorStatusWithEvent(groupSnapshot *crdv1beta2.VolumeGroupSnapshot, setReadyToFalse bool, eventtype, reason, message string) error {
klog.V(5).Infof("updateGroupSnapshotErrorStatusWithEvent[%s]", utils.GroupSnapshotKey(groupSnapshot))
if groupSnapshot.Status != nil && groupSnapshot.Status.Error != nil && *groupSnapshot.Status.Error.Message == message {
@@ -81,7 +81,7 @@ func (ctrl *csiSnapshotCommonController) updateGroupSnapshotErrorStatusWithEvent
}
groupSnapshotClone := groupSnapshot.DeepCopy()
if groupSnapshotClone.Status == nil {
- groupSnapshotClone.Status = &crdv1beta1.VolumeGroupSnapshotStatus{}
+ groupSnapshotClone.Status = &crdv1beta2.VolumeGroupSnapshotStatus{}
}
statusError := &crdv1.VolumeSnapshotError{
Time: &metav1.Time{
@@ -95,7 +95,7 @@ func (ctrl *csiSnapshotCommonController) updateGroupSnapshotErrorStatusWithEvent
ready := false
groupSnapshotClone.Status.ReadyToUse = &ready
}
- newSnapshot, err := ctrl.clientset.GroupsnapshotV1beta1().VolumeGroupSnapshots(groupSnapshotClone.Namespace).UpdateStatus(context.TODO(), groupSnapshotClone, metav1.UpdateOptions{})
+ newSnapshot, err := ctrl.clientset.GroupsnapshotV1beta2().VolumeGroupSnapshots(groupSnapshotClone.Namespace).UpdateStatus(context.TODO(), groupSnapshotClone, metav1.UpdateOptions{})
// Emit the event even if the status update fails so that user can see the error
ctrl.eventRecorder.Event(newSnapshot, eventtype, reason, message)
@@ -120,7 +120,7 @@ func (ctrl *csiSnapshotCommonController) updateGroupSnapshotErrorStatusWithEvent
// For dynamic provisioning, it gets the default GroupSnapshotClasses in the
// system if there is any (could be multiple), and finds the one with the same
// CSI Driver as a PV from which a group snapshot will be taken.
-func (ctrl *csiSnapshotCommonController) SetDefaultGroupSnapshotClass(groupSnapshot *crdv1beta1.VolumeGroupSnapshot) (*crdv1beta1.VolumeGroupSnapshotClass, *crdv1beta1.VolumeGroupSnapshot, error) {
+func (ctrl *csiSnapshotCommonController) SetDefaultGroupSnapshotClass(groupSnapshot *crdv1beta2.VolumeGroupSnapshot) (*crdv1beta2.VolumeGroupSnapshotClass, *crdv1beta2.VolumeGroupSnapshot, error) {
klog.V(5).Infof("SetDefaultGroupSnapshotClass for group snapshot [%s]", groupSnapshot.Name)
if groupSnapshot.Spec.Source.VolumeGroupSnapshotContentName != nil {
@@ -141,7 +141,7 @@ func (ctrl *csiSnapshotCommonController) SetDefaultGroupSnapshotClass(groupSnaps
return nil, groupSnapshot, err
}
- defaultClasses := []*crdv1beta1.VolumeGroupSnapshotClass{}
+ defaultClasses := []*crdv1beta2.VolumeGroupSnapshotClass{}
for _, groupSnapshotClass := range list {
if utils.IsVolumeGroupSnapshotClassDefaultAnnotation(groupSnapshotClass.ObjectMeta) && pvDriver == groupSnapshotClass.Driver {
defaultClasses = append(defaultClasses, groupSnapshotClass)
@@ -158,14 +158,14 @@ func (ctrl *csiSnapshotCommonController) SetDefaultGroupSnapshotClass(groupSnaps
klog.V(5).Infof("setDefaultGroupSnapshotClass [%s]: default VolumeGroupSnapshotClassName [%s]", groupSnapshot.Name, defaultClasses[0].Name)
groupSnapshotClone := groupSnapshot.DeepCopy()
groupSnapshotClone.Spec.VolumeGroupSnapshotClassName = &(defaultClasses[0].Name)
- newGroupSnapshot, err := ctrl.clientset.GroupsnapshotV1beta1().VolumeGroupSnapshots(groupSnapshotClone.Namespace).Update(context.TODO(), groupSnapshotClone, metav1.UpdateOptions{})
+ newGroupSnapshot, err := ctrl.clientset.GroupsnapshotV1beta2().VolumeGroupSnapshots(groupSnapshotClone.Namespace).Update(context.TODO(), groupSnapshotClone, metav1.UpdateOptions{})
if err != nil {
klog.V(4).Infof("updating VolumeGroupSnapshot[%s] default group snapshot class failed %v", utils.GroupSnapshotKey(groupSnapshot), err)
}
_, updateErr := ctrl.storeGroupSnapshotUpdate(newGroupSnapshot)
if updateErr != nil {
// We will get a "group snapshot update" event soon, this is not a big error
- klog.V(4).Infof("setDefaultSnapshotClass [%s]: cannot update internal cache: %v", utils.GroupSnapshotKey(groupSnapshot), updateErr)
+ klog.V(4).Infof("setDefaultGroupSnapshotClass [%s]: cannot update internal cache: %v", utils.GroupSnapshotKey(groupSnapshot), updateErr)
}
return defaultClasses[0], newGroupSnapshot, nil
@@ -175,7 +175,7 @@ func (ctrl *csiSnapshotCommonController) SetDefaultGroupSnapshotClass(groupSnaps
// It looks up every PVC from which the group snapshot is specified to be created from, and looks for the PVC's
// corresponding PV. Bi-directional binding will be verified between PVC and PV before the PV's CSI driver is returned.
// For an non-CSI volume, it returns an error immediately as it's not supported.
-func (ctrl *csiSnapshotCommonController) pvDriverFromGroupSnapshot(groupSnapshot *crdv1beta1.VolumeGroupSnapshot) (string, error) {
+func (ctrl *csiSnapshotCommonController) pvDriverFromGroupSnapshot(groupSnapshot *crdv1beta2.VolumeGroupSnapshot) (string, error) {
pvs, err := ctrl.getVolumesFromVolumeGroupSnapshot(groupSnapshot)
if err != nil {
return "", err
@@ -188,7 +188,7 @@ func (ctrl *csiSnapshotCommonController) pvDriverFromGroupSnapshot(groupSnapshot
}
// getVolumesFromVolumeGroupSnapshot returns the list of PersistentVolume from a VolumeGroupSnapshot.
-func (ctrl *csiSnapshotCommonController) getVolumesFromVolumeGroupSnapshot(groupSnapshot *crdv1beta1.VolumeGroupSnapshot) ([]*v1.PersistentVolume, error) {
+func (ctrl *csiSnapshotCommonController) getVolumesFromVolumeGroupSnapshot(groupSnapshot *crdv1beta2.VolumeGroupSnapshot) ([]*v1.PersistentVolume, error) {
var pvReturnList []*v1.PersistentVolume
pvcs, err := ctrl.getClaimsFromVolumeGroupSnapshot(groupSnapshot)
if err != nil {
@@ -219,7 +219,7 @@ func (ctrl *csiSnapshotCommonController) getVolumesFromVolumeGroupSnapshot(group
}
// getClaimsFromVolumeGroupSnapshot is a helper function to get a list of PVCs from VolumeGroupSnapshot.
-func (ctrl *csiSnapshotCommonController) getClaimsFromVolumeGroupSnapshot(groupSnapshot *crdv1beta1.VolumeGroupSnapshot) ([]v1.PersistentVolumeClaim, error) {
+func (ctrl *csiSnapshotCommonController) getClaimsFromVolumeGroupSnapshot(groupSnapshot *crdv1beta2.VolumeGroupSnapshot) ([]v1.PersistentVolumeClaim, error) {
labelSelector := groupSnapshot.Spec.Source.Selector
// Get PVC that has group snapshot label applied.
@@ -235,7 +235,7 @@ func (ctrl *csiSnapshotCommonController) getClaimsFromVolumeGroupSnapshot(groupS
// updateGroupSnapshot runs in worker thread and handles "groupsnapshot added",
// "groupsnapshot updated" and "periodic sync" events.
-func (ctrl *csiSnapshotCommonController) updateGroupSnapshot(ctx context.Context, groupSnapshot *crdv1beta1.VolumeGroupSnapshot) error {
+func (ctrl *csiSnapshotCommonController) updateGroupSnapshot(ctx context.Context, groupSnapshot *crdv1beta2.VolumeGroupSnapshot) error {
// Store the new group snapshot version in the cache and do not process it
// if this is an old version.
klog.V(5).Infof("updateGroupSnapshot %q", utils.GroupSnapshotKey(groupSnapshot))
@@ -262,7 +262,7 @@ func (ctrl *csiSnapshotCommonController) updateGroupSnapshot(ctx context.Context
}
// deleteGroupSnapshot runs in worker thread and handles "groupsnapshot deleted" event.
-func (ctrl *csiSnapshotCommonController) deleteGroupSnapshot(groupSnapshot *crdv1beta1.VolumeGroupSnapshot) {
+func (ctrl *csiSnapshotCommonController) deleteGroupSnapshot(groupSnapshot *crdv1beta2.VolumeGroupSnapshot) {
_ = ctrl.groupSnapshotStore.Delete(groupSnapshot)
klog.V(4).Infof("group snapshot %q deleted", utils.GroupSnapshotKey(groupSnapshot))
@@ -295,7 +295,7 @@ func (ctrl *csiSnapshotCommonController) deleteGroupSnapshot(groupSnapshot *crdv
// a group snapshot is created, updated or periodically synced. We do not
// differentiate between these events.
// For easier readability, it is split into syncUnreadyGroupSnapshot and syncReadyGroupSnapshot
-func (ctrl *csiSnapshotCommonController) syncGroupSnapshot(ctx context.Context, groupSnapshot *crdv1beta1.VolumeGroupSnapshot) error {
+func (ctrl *csiSnapshotCommonController) syncGroupSnapshot(ctx context.Context, groupSnapshot *crdv1beta2.VolumeGroupSnapshot) error {
klog.V(5).Infof("synchronizing VolumeGroupSnapshot[%s]", utils.GroupSnapshotKey(groupSnapshot))
klog.V(5).Infof("syncGroupSnapshot [%s]: check if we should remove finalizer on group snapshot PVC source and remove it if we can", utils.GroupSnapshotKey(groupSnapshot))
@@ -336,7 +336,7 @@ func (ctrl *csiSnapshotCommonController) syncGroupSnapshot(ctx context.Context,
// snapshot content successfully before.
// If there is any problem with the binding (e.g., group snapshot points to a
// non-existent group snapshot content), update the group snapshot status and emit event.
-func (ctrl *csiSnapshotCommonController) syncReadyGroupSnapshot(groupSnapshot *crdv1beta1.VolumeGroupSnapshot) error {
+func (ctrl *csiSnapshotCommonController) syncReadyGroupSnapshot(groupSnapshot *crdv1beta2.VolumeGroupSnapshot) error {
if !utils.IsBoundVolumeGroupSnapshotContentNameSet(groupSnapshot) {
return fmt.Errorf("group snapshot %s is not bound to a group snapshot content", utils.GroupSnapshotKey(groupSnapshot))
}
@@ -364,7 +364,7 @@ func (ctrl *csiSnapshotCommonController) syncReadyGroupSnapshot(groupSnapshot *c
// snapshot content cache store by name.
// Note that if no VolumeGroupSnapshotContent exists in the cache store and no error
// encountered, it returns (nil, nil)
-func (ctrl *csiSnapshotCommonController) getGroupSnapshotContentFromStore(contentName string) (*crdv1beta1.VolumeGroupSnapshotContent, error) {
+func (ctrl *csiSnapshotCommonController) getGroupSnapshotContentFromStore(contentName string) (*crdv1beta2.VolumeGroupSnapshotContent, error) {
obj, exist, err := ctrl.groupSnapshotContentStore.GetByKey(contentName)
if err != nil {
// should never reach here based on implementation at:
@@ -375,7 +375,7 @@ func (ctrl *csiSnapshotCommonController) getGroupSnapshotContentFromStore(conten
// not able to find a matching group snapshot content
return nil, nil
}
- groupSnapshotContent, ok := obj.(*crdv1beta1.VolumeGroupSnapshotContent)
+ groupSnapshotContent, ok := obj.(*crdv1beta2.VolumeGroupSnapshotContent)
if !ok {
return nil, fmt.Errorf("expected VolumeGroupSnapshotContent, got %+v", obj)
}
@@ -384,7 +384,7 @@ func (ctrl *csiSnapshotCommonController) getGroupSnapshotContentFromStore(conten
// syncUnreadyGroupSnapshot is the main controller method to decide what to do
// with a group snapshot which is not set to ready.
-func (ctrl *csiSnapshotCommonController) syncUnreadyGroupSnapshot(ctx context.Context, groupSnapshot *crdv1beta1.VolumeGroupSnapshot) error {
+func (ctrl *csiSnapshotCommonController) syncUnreadyGroupSnapshot(ctx context.Context, groupSnapshot *crdv1beta2.VolumeGroupSnapshot) error {
uniqueGroupSnapshotName := utils.GroupSnapshotKey(groupSnapshot)
klog.V(5).Infof("syncUnreadyGroupSnapshot %s", uniqueGroupSnapshotName)
driverName, err := ctrl.getGroupSnapshotDriverName(groupSnapshot)
@@ -478,7 +478,7 @@ func (ctrl *csiSnapshotCommonController) syncUnreadyGroupSnapshot(ctx context.Co
}
// If reach here, it is a dynamically provisioned group snapshot, and the VolumeGroupSnapshotContent object is not yet created.
- var groupSnapshotContent *crdv1beta1.VolumeGroupSnapshotContent
+ var groupSnapshotContent *crdv1beta2.VolumeGroupSnapshotContent
if groupSnapshotContent, err = ctrl.createGroupSnapshotContent(groupSnapshot); err != nil {
ctrl.updateGroupSnapshotErrorStatusWithEvent(groupSnapshot, true, v1.EventTypeWarning, "GroupSnapshotContentCreationFailed", fmt.Sprintf("failed to create group snapshot content with error %v", err))
return err
@@ -496,12 +496,12 @@ func (ctrl *csiSnapshotCommonController) syncUnreadyGroupSnapshot(ctx context.Co
func (ctrl *csiSnapshotCommonController) createSnapshotsForGroupSnapshotContent(
ctx context.Context,
- groupSnapshotContent *crdv1beta1.VolumeGroupSnapshotContent,
- groupSnapshot *crdv1beta1.VolumeGroupSnapshot,
-) (*crdv1beta1.VolumeGroupSnapshotContent, error) {
+ groupSnapshotContent *crdv1beta2.VolumeGroupSnapshotContent,
+ groupSnapshot *crdv1beta2.VolumeGroupSnapshot,
+) (*crdv1beta2.VolumeGroupSnapshotContent, error) {
// No status is present, or no volume snapshot was provisioned.
// Let's wait for the snapshotter sidecar to fill it.
- if groupSnapshotContent.Status == nil || len(groupSnapshotContent.Status.VolumeSnapshotHandlePairList) == 0 {
+ if groupSnapshotContent.Status == nil || len(groupSnapshotContent.Status.VolumeSnapshotInfoList) == 0 {
return groupSnapshotContent, nil
}
@@ -540,7 +540,7 @@ func (ctrl *csiSnapshotCommonController) createSnapshotsForGroupSnapshotContent(
"createSnapshotsForGroupSnapshotContent[%s]: creating volumesnapshots and volumesnapshotcontent for group snapshot content",
groupSnapshotContent.Name)
- for _, snapshot := range groupSnapshotContent.Status.VolumeSnapshotHandlePairList {
+ for _, snapshot := range groupSnapshotContent.Status.VolumeSnapshotInfoList {
snapshotHandle := snapshot.SnapshotHandle
volumeHandle := snapshot.VolumeHandle
@@ -580,8 +580,9 @@ func (ctrl *csiSnapshotCommonController) createSnapshotsForGroupSnapshotContent(
VolumeHandle: &volumeHandle,
},
},
- // The status will be set by VolumeSnapshotContent reconciler
- // in the CSI snapshotter sidecar.
+ // The status will be set in a separate patch request by
+ // common snapshot controller, using the information from
+ // the VolumeGroupSnapshotContent object.
}
if pv != nil {
@@ -682,6 +683,21 @@ func (ctrl *csiSnapshotCommonController) createSnapshotsForGroupSnapshotContent(
Path: "/status/volumeGroupSnapshotHandle",
Value: groupSnapshotContent.Status.VolumeGroupSnapshotHandle,
},
+ {
+ Op: "replace",
+ Path: "/status/creationTime",
+ Value: snapshot.CreationTime,
+ },
+ {
+ Op: "replace",
+ Path: "/status/restoreSize",
+ Value: snapshot.RestoreSize,
+ },
+ {
+ Op: "replace",
+ Path: "/status/readyToUse",
+ Value: snapshot.ReadyToUse,
+ },
}, ctrl.clientset, "status")
if err != nil {
return groupSnapshotContent, fmt.Errorf(
@@ -749,7 +765,7 @@ func getSnapshotContentNameForVolumeGroupSnapshotContent(groupSnapshotUUID, volu
// VolumeGroupSnapshot, it updates the status of the group snapshot with an event
// and returns an error.
// Otherwise, the found group snapshot content will be returned.
-func (ctrl *csiSnapshotCommonController) getPreprovisionedGroupSnapshotContentFromStore(groupSnapshot *crdv1beta1.VolumeGroupSnapshot) (*crdv1beta1.VolumeGroupSnapshotContent, error) {
+func (ctrl *csiSnapshotCommonController) getPreprovisionedGroupSnapshotContentFromStore(groupSnapshot *crdv1beta2.VolumeGroupSnapshot) (*crdv1beta2.VolumeGroupSnapshotContent, error) {
contentName := *groupSnapshot.Spec.Source.VolumeGroupSnapshotContentName
if contentName == "" {
return nil, fmt.Errorf("empty VolumeGroupSnapshotContentName for group snapshot %s", utils.GroupSnapshotKey(groupSnapshot))
@@ -786,7 +802,7 @@ func (ctrl *csiSnapshotCommonController) getPreprovisionedGroupSnapshotContentFr
// the group snapshot content with the group snapshot. This is for static binding where
// user has specified group snapshot name but not UID of the group snapshot in
// groupSnapshotContent.Spec.VolumeGroupSnapshotRef.
-func (ctrl *csiSnapshotCommonController) checkAndBindGroupSnapshotContent(groupSnapshot *crdv1beta1.VolumeGroupSnapshot, groupSnapshotContent *crdv1beta1.VolumeGroupSnapshotContent) (*crdv1beta1.VolumeGroupSnapshotContent, error) {
+func (ctrl *csiSnapshotCommonController) checkAndBindGroupSnapshotContent(groupSnapshot *crdv1beta2.VolumeGroupSnapshot, groupSnapshotContent *crdv1beta2.VolumeGroupSnapshotContent) (*crdv1beta2.VolumeGroupSnapshotContent, error) {
if groupSnapshotContent.Spec.VolumeGroupSnapshotRef.Name != groupSnapshot.Name {
return nil, fmt.Errorf("Could not bind group snapshot %s and group snapshot content %s, the VolumeGroupSnapshotRef does not match", groupSnapshot.Name, groupSnapshotContent.Name)
} else if groupSnapshotContent.Spec.VolumeGroupSnapshotRef.UID != "" && groupSnapshotContent.Spec.VolumeGroupSnapshotRef.UID != groupSnapshot.UID {
@@ -826,7 +842,7 @@ func (ctrl *csiSnapshotCommonController) checkAndBindGroupSnapshotContent(groupS
}
// updateGroupSnapshotStatus updates group snapshot status based on group snapshot content status
-func (ctrl *csiSnapshotCommonController) updateGroupSnapshotStatus(groupSnapshot *crdv1beta1.VolumeGroupSnapshot, groupSnapshotContent *crdv1beta1.VolumeGroupSnapshotContent) (*crdv1beta1.VolumeGroupSnapshot, error) {
+func (ctrl *csiSnapshotCommonController) updateGroupSnapshotStatus(groupSnapshot *crdv1beta2.VolumeGroupSnapshot, groupSnapshotContent *crdv1beta2.VolumeGroupSnapshotContent) (*crdv1beta2.VolumeGroupSnapshot, error) {
klog.V(5).Infof("updateGroupSnapshotStatus[%s]", utils.GroupSnapshotKey(groupSnapshot))
boundContentName := groupSnapshotContent.Name
@@ -845,15 +861,15 @@ func (ctrl *csiSnapshotCommonController) updateGroupSnapshotStatus(groupSnapshot
klog.V(5).Infof("updateGroupSnapshotStatus: updating VolumeGroupSnapshot [%+v] based on VolumeGroupSnapshotContentStatus [%+v]", groupSnapshot, groupSnapshotContent.Status)
- groupSnapshotObj, err := ctrl.clientset.GroupsnapshotV1beta1().VolumeGroupSnapshots(groupSnapshot.Namespace).Get(context.TODO(), groupSnapshot.Name, metav1.GetOptions{})
+ groupSnapshotObj, err := ctrl.clientset.GroupsnapshotV1beta2().VolumeGroupSnapshots(groupSnapshot.Namespace).Get(context.TODO(), groupSnapshot.Name, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("error get group snapshot %s from api server: %v", utils.GroupSnapshotKey(groupSnapshot), err)
}
- var newStatus *crdv1beta1.VolumeGroupSnapshotStatus
+ var newStatus *crdv1beta2.VolumeGroupSnapshotStatus
updated := false
if groupSnapshotObj.Status == nil {
- newStatus = &crdv1beta1.VolumeGroupSnapshotStatus{
+ newStatus = &crdv1beta2.VolumeGroupSnapshotStatus{
BoundVolumeGroupSnapshotContentName: &boundContentName,
ReadyToUse: &readyToUse,
}
@@ -918,7 +934,7 @@ func (ctrl *csiSnapshotCommonController) updateGroupSnapshotStatus(groupSnapshot
ctrl.metricsManager.RecordMetrics(createAndReadyOperation, metrics.NewSnapshotOperationStatus(metrics.SnapshotStatusTypeSuccess), driverName)
}
- newGroupSnapshotObj, err := ctrl.clientset.GroupsnapshotV1beta1().VolumeGroupSnapshots(groupSnapshotClone.Namespace).UpdateStatus(context.TODO(), groupSnapshotClone, metav1.UpdateOptions{})
+ newGroupSnapshotObj, err := ctrl.clientset.GroupsnapshotV1beta2().VolumeGroupSnapshots(groupSnapshotClone.Namespace).UpdateStatus(context.TODO(), groupSnapshotClone, metav1.UpdateOptions{})
if err != nil {
return nil, newControllerUpdateError(utils.GroupSnapshotKey(groupSnapshot), err.Error())
}
@@ -942,7 +958,7 @@ func (ctrl *csiSnapshotCommonController) updateGroupSnapshotStatus(groupSnapshot
// If a group snapshot content is found but it does not point to the passed in VolumeGroupSnapshot,
// the passed in group snapshot will be updated with an error along with an event,
// and an error will be returned.
-func (ctrl *csiSnapshotCommonController) getDynamicallyProvisionedGroupContentFromStore(groupSnapshot *crdv1beta1.VolumeGroupSnapshot) (*crdv1beta1.VolumeGroupSnapshotContent, error) {
+func (ctrl *csiSnapshotCommonController) getDynamicallyProvisionedGroupContentFromStore(groupSnapshot *crdv1beta2.VolumeGroupSnapshot) (*crdv1beta2.VolumeGroupSnapshotContent, error) {
contentName := utils.GetDynamicSnapshotContentNameForGroupSnapshot(groupSnapshot)
groupSnapshotContent, err := ctrl.getGroupSnapshotContentFromStore(contentName)
if err != nil {
@@ -974,9 +990,9 @@ func (ctrl *csiSnapshotCommonController) getDynamicallyProvisionedGroupContentFr
}
// This routine sets snapshot.Spec.Source.VolumeGroupSnapshotContentName
-func (ctrl *csiSnapshotCommonController) bindandUpdateVolumeGroupSnapshot(groupSnapshotContent *crdv1beta1.VolumeGroupSnapshotContent, groupSnapshot *crdv1beta1.VolumeGroupSnapshot) (*crdv1beta1.VolumeGroupSnapshot, error) {
+func (ctrl *csiSnapshotCommonController) bindandUpdateVolumeGroupSnapshot(groupSnapshotContent *crdv1beta2.VolumeGroupSnapshotContent, groupSnapshot *crdv1beta2.VolumeGroupSnapshot) (*crdv1beta2.VolumeGroupSnapshot, error) {
klog.V(5).Infof("bindandUpdateVolumeGroupSnapshot for group snapshot [%s]: groupSnapshotContent [%s]", groupSnapshot.Name, groupSnapshotContent.Name)
- groupSnapshotObj, err := ctrl.clientset.GroupsnapshotV1beta1().VolumeGroupSnapshots(groupSnapshot.Namespace).Get(context.TODO(), groupSnapshot.Name, metav1.GetOptions{})
+ groupSnapshotObj, err := ctrl.clientset.GroupsnapshotV1beta2().VolumeGroupSnapshots(groupSnapshot.Namespace).Get(context.TODO(), groupSnapshot.Name, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("error get group snapshot %s from api server: %v", utils.GroupSnapshotKey(groupSnapshot), err)
}
@@ -984,7 +1000,7 @@ func (ctrl *csiSnapshotCommonController) bindandUpdateVolumeGroupSnapshot(groupS
// Copy the group snapshot object before updating it
groupSnapshotCopy := groupSnapshotObj.DeepCopy()
// update group snapshot status
- var updateGroupSnapshot *crdv1beta1.VolumeGroupSnapshot
+ var updateGroupSnapshot *crdv1beta2.VolumeGroupSnapshot
klog.V(5).Infof("bindandUpdateVolumeGroupSnapshot [%s]: trying to update group snapshot status", utils.GroupSnapshotKey(groupSnapshotCopy))
updateGroupSnapshot, err = ctrl.updateGroupSnapshotStatus(groupSnapshotCopy, groupSnapshotContent)
if err == nil {
@@ -1007,7 +1023,7 @@ func (ctrl *csiSnapshotCommonController) bindandUpdateVolumeGroupSnapshot(groupS
}
// createGroupSnapshotContent will only be called for dynamic provisioning
-func (ctrl *csiSnapshotCommonController) createGroupSnapshotContent(groupSnapshot *crdv1beta1.VolumeGroupSnapshot) (*crdv1beta1.VolumeGroupSnapshotContent, error) {
+func (ctrl *csiSnapshotCommonController) createGroupSnapshotContent(groupSnapshot *crdv1beta2.VolumeGroupSnapshot) (*crdv1beta2.VolumeGroupSnapshotContent, error) {
klog.Infof("createGroupSnapshotContent: Creating group snapshot content for group snapshot %s through the plugin ...", utils.GroupSnapshotKey(groupSnapshot))
/*
@@ -1058,13 +1074,13 @@ func (ctrl *csiSnapshotCommonController) createGroupSnapshotContent(groupSnapsho
volumeHandles = append(volumeHandles, pv.Spec.CSI.VolumeHandle)
}
- groupSnapshotContent := &crdv1beta1.VolumeGroupSnapshotContent{
+ groupSnapshotContent := &crdv1beta2.VolumeGroupSnapshotContent{
ObjectMeta: metav1.ObjectMeta{
Name: contentName,
},
- Spec: crdv1beta1.VolumeGroupSnapshotContentSpec{
+ Spec: crdv1beta2.VolumeGroupSnapshotContentSpec{
VolumeGroupSnapshotRef: *snapshotRef,
- Source: crdv1beta1.VolumeGroupSnapshotContentSource{
+ Source: crdv1beta2.VolumeGroupSnapshotContentSource{
VolumeHandles: volumeHandles,
},
VolumeGroupSnapshotClassName: &(groupSnapshotClass.Name),
@@ -1084,11 +1100,11 @@ func (ctrl *csiSnapshotCommonController) createGroupSnapshotContent(groupSnapsho
metav1.SetMetaDataAnnotation(&groupSnapshotContent.ObjectMeta, utils.AnnDeletionGroupSecretRefNamespace, snapshotterSecretRef.Namespace)
}
- var updateGroupSnapshotContent *crdv1beta1.VolumeGroupSnapshotContent
+ var updateGroupSnapshotContent *crdv1beta2.VolumeGroupSnapshotContent
klog.V(5).Infof("volume group snapshot content %#v", groupSnapshotContent)
// Try to create the VolumeGroupSnapshotContent object
klog.V(5).Infof("createGroupSnapshotContent [%s]: trying to save volume group snapshot content %s", utils.GroupSnapshotKey(groupSnapshot), groupSnapshotContent.Name)
- if updateGroupSnapshotContent, err = ctrl.clientset.GroupsnapshotV1beta1().VolumeGroupSnapshotContents().Create(context.TODO(), groupSnapshotContent, metav1.CreateOptions{}); err == nil || apierrs.IsAlreadyExists(err) {
+ if updateGroupSnapshotContent, err = ctrl.clientset.GroupsnapshotV1beta2().VolumeGroupSnapshotContents().Create(context.TODO(), groupSnapshotContent, metav1.CreateOptions{}); err == nil || apierrs.IsAlreadyExists(err) {
// Save succeeded.
if err != nil {
klog.V(3).Infof("volume group snapshot content %q for group snapshot %q already exists, reusing", groupSnapshotContent.Name, utils.GroupSnapshotKey(groupSnapshot))
@@ -1118,10 +1134,10 @@ func (ctrl *csiSnapshotCommonController) createGroupSnapshotContent(groupSnapsho
return updateGroupSnapshotContent, nil
}
-func (ctrl *csiSnapshotCommonController) getCreateGroupSnapshotInput(groupSnapshot *crdv1beta1.VolumeGroupSnapshot) (*crdv1beta1.VolumeGroupSnapshotClass, []*v1.PersistentVolume, string, *v1.SecretReference, error) {
+func (ctrl *csiSnapshotCommonController) getCreateGroupSnapshotInput(groupSnapshot *crdv1beta2.VolumeGroupSnapshot) (*crdv1beta2.VolumeGroupSnapshotClass, []*v1.PersistentVolume, string, *v1.SecretReference, error) {
className := groupSnapshot.Spec.VolumeGroupSnapshotClassName
klog.V(5).Infof("getCreateGroupSnapshotInput [%s]", groupSnapshot.Name)
- var groupSnapshotClass *crdv1beta1.VolumeGroupSnapshotClass
+ var groupSnapshotClass *crdv1beta2.VolumeGroupSnapshotClass
var err error
if className != nil {
groupSnapshotClass, err = ctrl.getGroupSnapshotClass(*className)
@@ -1153,7 +1169,7 @@ func (ctrl *csiSnapshotCommonController) getCreateGroupSnapshotInput(groupSnapsh
}
// syncGroupSnapshotContent deals with one key off the queue
-func (ctrl *csiSnapshotCommonController) syncGroupSnapshotContent(groupSnapshotContent *crdv1beta1.VolumeGroupSnapshotContent) error {
+func (ctrl *csiSnapshotCommonController) syncGroupSnapshotContent(groupSnapshotContent *crdv1beta2.VolumeGroupSnapshotContent) error {
groupSnapshotName := utils.GroupSnapshotRefKey(&groupSnapshotContent.Spec.VolumeGroupSnapshotRef)
klog.V(4).Infof("synchronizing VolumeGroupSnapshotContent[%s]: group snapshot content is bound to group snapshot %s", groupSnapshotContent.Name, groupSnapshotName)
@@ -1185,7 +1201,7 @@ func (ctrl *csiSnapshotCommonController) syncGroupSnapshotContent(groupSnapshotC
// If getGroupSnapshotFromStore returns (nil, nil), it means group snapshot not found
// and it may have already been deleted, and it will fall into the
// group snapshot == nil case below
- var groupSnapshot *crdv1beta1.VolumeGroupSnapshot
+ var groupSnapshot *crdv1beta2.VolumeGroupSnapshot
groupSnapshot, err := ctrl.getGroupSnapshotFromStore(groupSnapshotName)
if err != nil {
return err
@@ -1214,9 +1230,9 @@ func (ctrl *csiSnapshotCommonController) syncGroupSnapshotContent(groupSnapshotC
// getGroupSnapshotFromStore finds group snapshot from the cache store.
// If getGroupSnapshotFromStore returns (nil, nil), it means group snapshot not
// found and it may have already been deleted.
-func (ctrl *csiSnapshotCommonController) getGroupSnapshotFromStore(groupSnapshotName string) (*crdv1beta1.VolumeGroupSnapshot, error) {
+func (ctrl *csiSnapshotCommonController) getGroupSnapshotFromStore(groupSnapshotName string) (*crdv1beta2.VolumeGroupSnapshot, error) {
// Get the VolumeGroupSnapshot by _name_
- var groupSnapshot *crdv1beta1.VolumeGroupSnapshot
+ var groupSnapshot *crdv1beta2.VolumeGroupSnapshot
obj, found, err := ctrl.groupSnapshotStore.GetByKey(groupSnapshotName)
if err != nil {
return nil, err
@@ -1227,7 +1243,7 @@ func (ctrl *csiSnapshotCommonController) getGroupSnapshotFromStore(groupSnapshot
return nil, nil
}
var ok bool
- groupSnapshot, ok = obj.(*crdv1beta1.VolumeGroupSnapshot)
+ groupSnapshot, ok = obj.(*crdv1beta2.VolumeGroupSnapshot)
if !ok {
return nil, fmt.Errorf("cannot convert object from group snapshot cache to group snapshot %q!?: %#v", groupSnapshotName, obj)
}
@@ -1239,7 +1255,7 @@ func (ctrl *csiSnapshotCommonController) getGroupSnapshotFromStore(groupSnapshot
// needsUpdateGroupSnapshotStatus compares group snapshot status with the group snapshot content
// status and decide if group snapshot status needs to be updated based on group snapshot content
// status
-func (ctrl *csiSnapshotCommonController) needsUpdateGroupSnapshotStatus(groupSnapshot *crdv1beta1.VolumeGroupSnapshot, groupSnapshotContent *crdv1beta1.VolumeGroupSnapshotContent) bool {
+func (ctrl *csiSnapshotCommonController) needsUpdateGroupSnapshotStatus(groupSnapshot *crdv1beta2.VolumeGroupSnapshot, groupSnapshotContent *crdv1beta2.VolumeGroupSnapshotContent) bool {
klog.V(5).Infof("needsUpdateGroupSnapshotStatus[%s]", utils.GroupSnapshotKey(groupSnapshot))
if groupSnapshot.Status == nil && groupSnapshotContent.Status != nil {
@@ -1265,7 +1281,7 @@ func (ctrl *csiSnapshotCommonController) needsUpdateGroupSnapshotStatus(groupSna
}
// addGroupSnapshotContentFinalizer adds a Finalizer for VolumeGroupSnapshotContent.
-func (ctrl *csiSnapshotCommonController) addGroupSnapshotContentFinalizer(groupSnapshotContent *crdv1beta1.VolumeGroupSnapshotContent) error {
+func (ctrl *csiSnapshotCommonController) addGroupSnapshotContentFinalizer(groupSnapshotContent *crdv1beta2.VolumeGroupSnapshotContent) error {
var patches []utils.PatchOp
if len(groupSnapshotContent.Finalizers) > 0 {
// Add to the end of the finalizers if we have any other finalizers
@@ -1297,10 +1313,10 @@ func (ctrl *csiSnapshotCommonController) addGroupSnapshotContentFinalizer(groupS
}
// checkandAddGroupSnapshotFinalizers checks and adds group snapshot finailzers when needed
-func (ctrl *csiSnapshotCommonController) checkandAddGroupSnapshotFinalizers(groupSnapshot *crdv1beta1.VolumeGroupSnapshot) error {
+func (ctrl *csiSnapshotCommonController) checkandAddGroupSnapshotFinalizers(groupSnapshot *crdv1beta2.VolumeGroupSnapshot) error {
// get the group snapshot content for this group snapshot
var (
- groupSnapshotContent *crdv1beta1.VolumeGroupSnapshotContent
+ groupSnapshotContent *crdv1beta2.VolumeGroupSnapshotContent
err error
)
if groupSnapshot.Spec.Source.VolumeGroupSnapshotContentName != nil {
@@ -1326,8 +1342,8 @@ func (ctrl *csiSnapshotCommonController) checkandAddGroupSnapshotFinalizers(grou
}
// addGroupSnapshotFinalizer adds a Finalizer to a VolumeGroupSnapshot.
-func (ctrl *csiSnapshotCommonController) addGroupSnapshotFinalizer(groupSnapshot *crdv1beta1.VolumeGroupSnapshot, addBoundFinalizer bool) error {
- var updatedGroupSnapshot *crdv1beta1.VolumeGroupSnapshot
+func (ctrl *csiSnapshotCommonController) addGroupSnapshotFinalizer(groupSnapshot *crdv1beta2.VolumeGroupSnapshot, addBoundFinalizer bool) error {
+ var updatedGroupSnapshot *crdv1beta2.VolumeGroupSnapshot
var err error
// NOTE(ggriffiths): Must perform an update if no finalizers exist.
@@ -1337,7 +1353,7 @@ func (ctrl *csiSnapshotCommonController) addGroupSnapshotFinalizer(groupSnapshot
if addBoundFinalizer {
groupSnapshotClone.ObjectMeta.Finalizers = append(groupSnapshotClone.ObjectMeta.Finalizers, utils.VolumeGroupSnapshotBoundFinalizer)
}
- updatedGroupSnapshot, err = ctrl.clientset.GroupsnapshotV1beta1().VolumeGroupSnapshots(groupSnapshotClone.Namespace).Update(context.TODO(), groupSnapshotClone, metav1.UpdateOptions{})
+ updatedGroupSnapshot, err = ctrl.clientset.GroupsnapshotV1beta2().VolumeGroupSnapshots(groupSnapshotClone.Namespace).Update(context.TODO(), groupSnapshotClone, metav1.UpdateOptions{})
if err != nil {
return newControllerUpdateError(utils.GroupSnapshotKey(groupSnapshot), err.Error())
}
@@ -1376,7 +1392,7 @@ func (ctrl *csiSnapshotCommonController) addGroupSnapshotFinalizer(groupSnapshot
// with information obtained from step 1. This function name is very long but the
// name suggests what it does. It determines whether to remove finalizers on group
// snapshot and whether to delete group snapshot content.
-func (ctrl *csiSnapshotCommonController) processGroupSnapshotWithDeletionTimestamp(ctx context.Context, groupSnapshot *crdv1beta1.VolumeGroupSnapshot) error {
+func (ctrl *csiSnapshotCommonController) processGroupSnapshotWithDeletionTimestamp(ctx context.Context, groupSnapshot *crdv1beta2.VolumeGroupSnapshot) error {
klog.V(5).Infof("processGroupSnapshotWithDeletionTimestamp VolumeGroupSnapshot[%s]: %s", utils.GroupSnapshotKey(groupSnapshot), utils.GetGroupSnapshotStatusForLogging(groupSnapshot))
driverName, err := ctrl.getGroupSnapshotDriverName(groupSnapshot)
@@ -1491,7 +1507,7 @@ func (ctrl *csiSnapshotCommonController) processGroupSnapshotWithDeletionTimesta
// VolumeGroupSnapshotContent won't be deleted immediately due to the VolumeGroupSnapshotContentFinalizer
if groupSnapshotContent != nil && deleteGroupSnapshotContent {
klog.V(5).Infof("processGroupSnapshotWithDeletionTimestamp[%s]: set DeletionTimeStamp on group snapshot content [%s].", utils.GroupSnapshotKey(groupSnapshot), groupSnapshotContent.Name)
- err := ctrl.clientset.GroupsnapshotV1beta1().VolumeGroupSnapshotContents().Delete(ctx, groupSnapshotContent.Name, metav1.DeleteOptions{})
+ err := ctrl.clientset.GroupsnapshotV1beta2().VolumeGroupSnapshotContents().Delete(ctx, groupSnapshotContent.Name, metav1.DeleteOptions{})
if err != nil {
ctrl.eventRecorder.Event(groupSnapshot, v1.EventTypeWarning, "GroupSnapshotContentObjectDeleteError", "Failed to delete group snapshot content API object")
return fmt.Errorf("failed to delete VolumeGroupSnapshotContent %s from API server: %q", groupSnapshotContent.Name, err)
@@ -1530,7 +1546,7 @@ func (ctrl *csiSnapshotCommonController) processGroupSnapshotWithDeletionTimesta
return ctrl.removeGroupSnapshotFinalizer(groupSnapshot, removeBoundFinalizer)
}
-func (ctrl *csiSnapshotCommonController) setAnnVolumeGroupSnapshotBeingDeleted(groupSnapshotContent *crdv1beta1.VolumeGroupSnapshotContent) (*crdv1beta1.VolumeGroupSnapshotContent, error) {
+func (ctrl *csiSnapshotCommonController) setAnnVolumeGroupSnapshotBeingDeleted(groupSnapshotContent *crdv1beta2.VolumeGroupSnapshotContent) (*crdv1beta2.VolumeGroupSnapshotContent, error) {
if groupSnapshotContent == nil {
return groupSnapshotContent, nil
}
@@ -1590,7 +1606,7 @@ func (ctrl *csiSnapshotCommonController) findGroupSnapshotMembers(groupSnapshotN
}
// removeGroupSnapshotFinalizer removes a Finalizer for VolumeGroupSnapshot.
-func (ctrl *csiSnapshotCommonController) removeGroupSnapshotFinalizer(groupSnapshot *crdv1beta1.VolumeGroupSnapshot, removeBoundFinalizer bool) error {
+func (ctrl *csiSnapshotCommonController) removeGroupSnapshotFinalizer(groupSnapshot *crdv1beta2.VolumeGroupSnapshot, removeBoundFinalizer bool) error {
if !removeBoundFinalizer {
return nil
}
@@ -1599,7 +1615,7 @@ func (ctrl *csiSnapshotCommonController) removeGroupSnapshotFinalizer(groupSnaps
groupSnapshotClone := groupSnapshot.DeepCopy()
groupSnapshotClone.ObjectMeta.Finalizers = utils.RemoveString(groupSnapshotClone.ObjectMeta.Finalizers, utils.VolumeGroupSnapshotBoundFinalizer)
- newGroupSnapshot, err := ctrl.clientset.GroupsnapshotV1beta1().VolumeGroupSnapshots(groupSnapshotClone.Namespace).Update(context.TODO(), groupSnapshotClone, metav1.UpdateOptions{})
+ newGroupSnapshot, err := ctrl.clientset.GroupsnapshotV1beta2().VolumeGroupSnapshots(groupSnapshotClone.Namespace).Update(context.TODO(), groupSnapshotClone, metav1.UpdateOptions{})
if err != nil {
return newControllerUpdateError(groupSnapshot.Name, err.Error())
}
@@ -1615,7 +1631,7 @@ func (ctrl *csiSnapshotCommonController) removeGroupSnapshotFinalizer(groupSnaps
// getGroupSnapshotDriverName is a helper function to get driver from the VolumeGroupSnapshot.
// We try to get the driverName in multiple ways, as snapshot controller metrics depend on the correct driverName.
-func (ctrl *csiSnapshotCommonController) getGroupSnapshotDriverName(vgs *crdv1beta1.VolumeGroupSnapshot) (string, error) {
+func (ctrl *csiSnapshotCommonController) getGroupSnapshotDriverName(vgs *crdv1beta2.VolumeGroupSnapshot) (string, error) {
klog.V(5).Infof("getGroupSnapshotDriverName: VolumeGroupSnapshot[%s]", vgs.Name)
var driverName string
diff --git a/pkg/common-controller/groupsnapshot_create_test.go b/pkg/common-controller/groupsnapshot_create_test.go
index 8e3e2b322..33f94bd62 100644
--- a/pkg/common-controller/groupsnapshot_create_test.go
+++ b/pkg/common-controller/groupsnapshot_create_test.go
@@ -19,14 +19,14 @@ package common_controller
import (
"testing"
- crdv1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta1"
+ crdv1beta2 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta2"
crdv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
"github.com/kubernetes-csi/external-snapshotter/v8/pkg/utils"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
-var groupSnapshotClasses = []*crdv1beta1.VolumeGroupSnapshotClass{
+var groupSnapshotClasses = []*crdv1beta2.VolumeGroupSnapshotClass{
{
TypeMeta: metav1.TypeMeta{
Kind: "VolumeGroupSnapshotClass",
diff --git a/pkg/common-controller/snapshot_controller.go b/pkg/common-controller/snapshot_controller.go
index 732e617d5..67fdc6c5e 100644
--- a/pkg/common-controller/snapshot_controller.go
+++ b/pkg/common-controller/snapshot_controller.go
@@ -21,7 +21,6 @@ import (
"errors"
"fmt"
"slices"
- "strings"
"time"
v1 "k8s.io/api/core/v1"
@@ -1558,15 +1557,6 @@ func (e controllerUpdateError) Error() string {
return e.message
}
-func isControllerUpdateFailError(err *crdv1.VolumeSnapshotError) bool {
- if err != nil {
- if strings.Contains(*err.Message, controllerUpdateFailMsg) {
- return true
- }
- }
- return false
-}
-
// addSnapshotFinalizer adds a Finalizer for VolumeSnapshot.
func (ctrl *csiSnapshotCommonController) addSnapshotFinalizer(snapshot *crdv1.VolumeSnapshot, addSourceFinalizer bool, addBoundFinalizer bool) error {
var updatedSnapshot *crdv1.VolumeSnapshot
diff --git a/pkg/common-controller/snapshot_controller_base.go b/pkg/common-controller/snapshot_controller_base.go
index 8f88bb2d4..fcd65b3ae 100644
--- a/pkg/common-controller/snapshot_controller_base.go
+++ b/pkg/common-controller/snapshot_controller_base.go
@@ -19,15 +19,17 @@ package common_controller
import (
"context"
"fmt"
+ "sync"
"time"
- crdv1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta1"
+ crdv1beta2 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta2"
crdv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
clientset "github.com/kubernetes-csi/external-snapshotter/client/v8/clientset/versioned"
- groupsnapshotinformers "github.com/kubernetes-csi/external-snapshotter/client/v8/informers/externalversions/volumegroupsnapshot/v1beta1"
+ groupsnapshotinformers "github.com/kubernetes-csi/external-snapshotter/client/v8/informers/externalversions/volumegroupsnapshot/v1beta2"
snapshotinformers "github.com/kubernetes-csi/external-snapshotter/client/v8/informers/externalversions/volumesnapshot/v1"
- groupsnapshotlisters "github.com/kubernetes-csi/external-snapshotter/client/v8/listers/volumegroupsnapshot/v1beta1"
+ groupsnapshotlisters "github.com/kubernetes-csi/external-snapshotter/client/v8/listers/volumegroupsnapshot/v1beta2"
snapshotlisters "github.com/kubernetes-csi/external-snapshotter/client/v8/listers/volumesnapshot/v1"
+ "github.com/kubernetes-csi/external-snapshotter/v8/pkg/features"
"github.com/kubernetes-csi/external-snapshotter/v8/pkg/metrics"
"github.com/kubernetes-csi/external-snapshotter/v8/pkg/utils"
@@ -35,6 +37,7 @@ import (
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
+ utilfeature "k8s.io/apiserver/pkg/util/feature"
coreinformers "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
@@ -244,7 +247,7 @@ func NewCSISnapshotCommonController(
return ctrl
}
-func (ctrl *csiSnapshotCommonController) Run(workers int, stopCh <-chan struct{}) {
+func (ctrl *csiSnapshotCommonController) Run(workers int, stopCh <-chan struct{}, wg *sync.WaitGroup) {
defer ctrl.snapshotQueue.ShutDown()
defer ctrl.contentQueue.ShutDown()
if ctrl.enableVolumeGroupSnapshots {
@@ -276,12 +279,40 @@ func (ctrl *csiSnapshotCommonController) Run(workers int, stopCh <-chan struct{}
ctrl.initializeCaches()
- for i := 0; i < workers; i++ {
- go wait.Until(ctrl.snapshotWorker, 0, stopCh)
- go wait.Until(ctrl.contentWorker, 0, stopCh)
- if ctrl.enableVolumeGroupSnapshots {
- go wait.Until(ctrl.groupSnapshotWorker, 0, stopCh)
- go wait.Until(ctrl.groupSnapshotContentWorker, 0, stopCh)
+ if utilfeature.DefaultFeatureGate.Enabled(features.ReleaseLeaderElectionOnExit) {
+ for i := 0; i < workers; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ wait.Until(ctrl.snapshotWorker, 0, stopCh)
+ }()
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ wait.Until(ctrl.contentWorker, 0, stopCh)
+ }()
+
+ if ctrl.enableVolumeGroupSnapshots {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ wait.Until(ctrl.groupSnapshotWorker, 0, stopCh)
+ }()
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ wait.Until(ctrl.groupSnapshotContentWorker, 0, stopCh)
+ }()
+ }
+ }
+ } else {
+ for i := 0; i < workers; i++ {
+ go wait.Until(ctrl.snapshotWorker, 0, stopCh)
+ go wait.Until(ctrl.contentWorker, 0, stopCh)
+ if ctrl.enableVolumeGroupSnapshots {
+ go wait.Until(ctrl.groupSnapshotWorker, 0, stopCh)
+ go wait.Until(ctrl.groupSnapshotContentWorker, 0, stopCh)
+ }
}
}
@@ -658,7 +689,7 @@ func (ctrl *csiSnapshotCommonController) enqueueGroupSnapshotWork(obj interface{
if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil {
obj = unknown.Obj
}
- if groupSnapshot, ok := obj.(*crdv1beta1.VolumeGroupSnapshot); ok {
+ if groupSnapshot, ok := obj.(*crdv1beta2.VolumeGroupSnapshot); ok {
objName, err := cache.DeletionHandlingMetaNamespaceKeyFunc(groupSnapshot)
if err != nil {
klog.Errorf("failed to get key from object: %v, %v", err, groupSnapshot)
@@ -675,7 +706,7 @@ func (ctrl *csiSnapshotCommonController) enqueueGroupSnapshotContentWork(obj int
if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil {
obj = unknown.Obj
}
- if content, ok := obj.(*crdv1beta1.VolumeGroupSnapshotContent); ok {
+ if content, ok := obj.(*crdv1beta2.VolumeGroupSnapshotContent); ok {
objName, err := cache.DeletionHandlingMetaNamespaceKeyFunc(content)
if err != nil {
klog.Errorf("failed to get key from object: %v, %v", err, content)
@@ -769,7 +800,7 @@ func (ctrl *csiSnapshotCommonController) syncGroupSnapshotByKey(ctx context.Cont
klog.V(2).Infof("deletion of group snapshot %q was already processed", key)
return nil
}
- groupSnapshot, ok := vgsObj.(*crdv1beta1.VolumeGroupSnapshot)
+ groupSnapshot, ok := vgsObj.(*crdv1beta2.VolumeGroupSnapshot)
if !ok {
klog.Errorf("expected vgs, got %+v", vgsObj)
return nil
@@ -785,9 +816,9 @@ func (ctrl *csiSnapshotCommonController) syncGroupSnapshotByKey(ctx context.Cont
// If it is not set, gets it from default VolumeGroupSnapshotClass and sets it.
// On error, it must return the original group snapshot, not nil, because the caller
// syncGroupSnapshotByKey needs to check group snapshot's timestamp.
-func (ctrl *csiSnapshotCommonController) checkAndUpdateGroupSnapshotClass(groupSnapshot *crdv1beta1.VolumeGroupSnapshot) (*crdv1beta1.VolumeGroupSnapshot, error) {
+func (ctrl *csiSnapshotCommonController) checkAndUpdateGroupSnapshotClass(groupSnapshot *crdv1beta2.VolumeGroupSnapshot) (*crdv1beta2.VolumeGroupSnapshot, error) {
className := groupSnapshot.Spec.VolumeGroupSnapshotClassName
- var class *crdv1beta1.VolumeGroupSnapshotClass
+ var class *crdv1beta2.VolumeGroupSnapshotClass
var err error
newGroupSnapshot := groupSnapshot
if className != nil {
@@ -849,7 +880,7 @@ func (ctrl *csiSnapshotCommonController) syncGroupSnapshotContentByKey(key strin
klog.V(2).Infof("deletion of group snapshot content %q was already processed", key)
return nil
}
- content, ok := contentObj.(*crdv1beta1.VolumeGroupSnapshotContent)
+ content, ok := contentObj.(*crdv1beta2.VolumeGroupSnapshotContent)
if !ok {
klog.Errorf("expected group snapshot content, got %+v", content)
return nil
@@ -860,7 +891,7 @@ func (ctrl *csiSnapshotCommonController) syncGroupSnapshotContentByKey(key strin
// updateGroupSnapshotContent runs in worker thread and handles "groupsnapshotcontent added",
// "groupsnapshotcontent updated" and "periodic sync" events.
-func (ctrl *csiSnapshotCommonController) updateGroupSnapshotContent(content *crdv1beta1.VolumeGroupSnapshotContent) error {
+func (ctrl *csiSnapshotCommonController) updateGroupSnapshotContent(content *crdv1beta2.VolumeGroupSnapshotContent) error {
// Store the new group snapshot content version in the cache and do not process
// it if this is an old version.
new, err := ctrl.storeGroupSnapshotContentUpdate(content)
@@ -885,7 +916,7 @@ func (ctrl *csiSnapshotCommonController) updateGroupSnapshotContent(content *crd
}
// deleteGroupSnapshotContent runs in worker thread and handles "groupsnapshotcontent deleted" event.
-func (ctrl *csiSnapshotCommonController) deleteGroupSnapshotContent(content *crdv1beta1.VolumeGroupSnapshotContent) {
+func (ctrl *csiSnapshotCommonController) deleteGroupSnapshotContent(content *crdv1beta2.VolumeGroupSnapshotContent) {
_ = ctrl.groupSnapshotContentStore.Delete(content)
klog.V(4).Infof("group snapshot content %q deleted", content.Name)
diff --git a/pkg/features/features.go b/pkg/features/features.go
index c35b515ec..803581b96 100644
--- a/pkg/features/features.go
+++ b/pkg/features/features.go
@@ -24,6 +24,12 @@ import (
const (
// Enable usage of volume group snapshot
VolumeGroupSnapshot featuregate.Feature = "CSIVolumeGroupSnapshot"
+
+ // owner: @rhrmo
+ // alpha: v1.34
+ //
+ // Releases leader election lease on sigterm / sigint.
+ ReleaseLeaderElectionOnExit featuregate.Feature = "ReleaseLeaderElectionOnExit"
)
func init() {
@@ -33,5 +39,6 @@ func init() {
// defaultKubernetesFeatureGates consists of all known feature keys specific to external-snapshotter.
// To add a new feature, define a key for it above and add it here.
var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{
- VolumeGroupSnapshot: {Default: false, PreRelease: featuregate.Beta},
+ VolumeGroupSnapshot: {Default: false, PreRelease: featuregate.Beta},
+ ReleaseLeaderElectionOnExit: {Default: false, PreRelease: featuregate.Alpha},
}
diff --git a/pkg/sidecar-controller/csi_handler.go b/pkg/sidecar-controller/csi_handler.go
index 678f4d029..9658e3179 100644
--- a/pkg/sidecar-controller/csi_handler.go
+++ b/pkg/sidecar-controller/csi_handler.go
@@ -23,7 +23,7 @@ import (
"time"
"github.com/container-storage-interface/spec/lib/go/csi"
- crdv1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta1"
+ crdv1beta2 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta2"
"github.com/kubernetes-csi/external-snapshotter/v8/pkg/group_snapshotter"
crdv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
@@ -35,9 +35,9 @@ type Handler interface {
CreateSnapshot(content *crdv1.VolumeSnapshotContent, parameters map[string]string, snapshotterCredentials map[string]string) (string, string, time.Time, int64, bool, error)
DeleteSnapshot(content *crdv1.VolumeSnapshotContent, snapshotterCredentials map[string]string) error
GetSnapshotStatus(content *crdv1.VolumeSnapshotContent, snapshotterListCredentials map[string]string) (bool, time.Time, int64, string, error)
- CreateGroupSnapshot(content *crdv1beta1.VolumeGroupSnapshotContent, parameters map[string]string, snapshotterCredentials map[string]string) (string, string, []*csi.Snapshot, time.Time, bool, error)
- GetGroupSnapshotStatus(groupSnapshotContent *crdv1beta1.VolumeGroupSnapshotContent, snapshotIDs []string, snapshotterCredentials map[string]string) (bool, time.Time, error)
- DeleteGroupSnapshot(content *crdv1beta1.VolumeGroupSnapshotContent, SnapshotID []string, snapshotterCredentials map[string]string) error
+ CreateGroupSnapshot(content *crdv1beta2.VolumeGroupSnapshotContent, parameters map[string]string, snapshotterCredentials map[string]string) (string, string, []*csi.Snapshot, time.Time, bool, error)
+ GetGroupSnapshotStatus(groupSnapshotContent *crdv1beta2.VolumeGroupSnapshotContent, snapshotIDs []string, snapshotterCredentials map[string]string) (bool, time.Time, error)
+ DeleteGroupSnapshot(content *crdv1beta2.VolumeGroupSnapshotContent, SnapshotID []string, snapshotterCredentials map[string]string) error
}
// csiHandler is a handler that calls CSI to create/delete volume snapshot.
@@ -148,7 +148,7 @@ func makeSnapshotName(prefix, snapshotUID string, snapshotNameUUIDLength int) (s
return fmt.Sprintf("%s-%s", prefix, strings.Replace(snapshotUID, "-", "", -1)[0:snapshotNameUUIDLength]), nil
}
-func (handler *csiHandler) CreateGroupSnapshot(content *crdv1beta1.VolumeGroupSnapshotContent, parameters map[string]string, snapshotterCredentials map[string]string) (string, string, []*csi.Snapshot, time.Time, bool, error) {
+func (handler *csiHandler) CreateGroupSnapshot(content *crdv1beta2.VolumeGroupSnapshotContent, parameters map[string]string, snapshotterCredentials map[string]string) (string, string, []*csi.Snapshot, time.Time, bool, error) {
ctx, cancel := context.WithTimeout(context.Background(), handler.timeout)
defer cancel()
@@ -167,7 +167,7 @@ func (handler *csiHandler) CreateGroupSnapshot(content *crdv1beta1.VolumeGroupSn
return handler.groupSnapshotter.CreateGroupSnapshot(ctx, groupSnapshotName, content.Spec.Source.VolumeHandles, parameters, snapshotterCredentials)
}
-func (handler *csiHandler) DeleteGroupSnapshot(content *crdv1beta1.VolumeGroupSnapshotContent, snapshotIDs []string, snapshotterCredentials map[string]string) error {
+func (handler *csiHandler) DeleteGroupSnapshot(content *crdv1beta2.VolumeGroupSnapshotContent, snapshotIDs []string, snapshotterCredentials map[string]string) error {
ctx, cancel := context.WithTimeout(context.Background(), handler.timeout)
defer cancel()
@@ -188,7 +188,7 @@ func (handler *csiHandler) DeleteGroupSnapshot(content *crdv1beta1.VolumeGroupSn
return handler.groupSnapshotter.DeleteGroupSnapshot(ctx, groupSnapshotHandle, snapshotIDs, snapshotterCredentials)
}
-func (handler *csiHandler) GetGroupSnapshotStatus(content *crdv1beta1.VolumeGroupSnapshotContent, snapshotIDs []string, snapshotterCredentials map[string]string) (bool, time.Time, error) {
+func (handler *csiHandler) GetGroupSnapshotStatus(content *crdv1beta2.VolumeGroupSnapshotContent, snapshotIDs []string, snapshotterCredentials map[string]string) (bool, time.Time, error) {
ctx, cancel := context.WithTimeout(context.Background(), handler.timeout)
defer cancel()
diff --git a/pkg/sidecar-controller/framework_test.go b/pkg/sidecar-controller/framework_test.go
index 9265a3686..a8bf8b74a 100644
--- a/pkg/sidecar-controller/framework_test.go
+++ b/pkg/sidecar-controller/framework_test.go
@@ -27,6 +27,7 @@ import (
"time"
jsonpatch "github.com/evanphx/json-patch"
+ "github.com/google/go-cmp/cmp"
crdv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
clientset "github.com/kubernetes-csi/external-snapshotter/client/v8/clientset/versioned"
"github.com/kubernetes-csi/external-snapshotter/client/v8/clientset/versioned/fake"
@@ -39,7 +40,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
- "k8s.io/apimachinery/pkg/util/diff"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes"
@@ -359,7 +359,7 @@ func (r *snapshotReactor) checkContents(expectedContents []*crdv1.VolumeSnapshot
if !reflect.DeepEqual(expectedMap, gotMap) {
// Print ugly but useful diff of expected and received objects for
// easier debugging.
- return fmt.Errorf("content check failed [A-expected, B-got]: %s", diff.ObjectDiff(expectedMap, gotMap))
+ return fmt.Errorf("content check failed [A-expected, B-got]: %s", cmp.Diff(expectedMap, gotMap))
}
return nil
}
@@ -577,8 +577,8 @@ func newTestController(kubeClient kubernetes.Interface, clientset clientset.Inte
true,
workqueue.NewTypedItemExponentialFailureRateLimiter[string](1*time.Millisecond, 1*time.Minute),
false,
- informerFactory.Groupsnapshot().V1beta1().VolumeGroupSnapshotContents(),
- informerFactory.Groupsnapshot().V1beta1().VolumeGroupSnapshotClasses(),
+ informerFactory.Groupsnapshot().V1beta2().VolumeGroupSnapshotContents(),
+ informerFactory.Groupsnapshot().V1beta2().VolumeGroupSnapshotClasses(),
workqueue.NewTypedItemExponentialFailureRateLimiter[string](1*time.Millisecond, 1*time.Minute),
)
diff --git a/pkg/sidecar-controller/groupsnapshot_helper.go b/pkg/sidecar-controller/groupsnapshot_helper.go
index 34a6ed67c..70512ea83 100644
--- a/pkg/sidecar-controller/groupsnapshot_helper.go
+++ b/pkg/sidecar-controller/groupsnapshot_helper.go
@@ -24,13 +24,14 @@ import (
"strings"
"time"
+ "github.com/container-storage-interface/spec/lib/go/csi"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/cache"
klog "k8s.io/klog/v2"
- crdv1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta1"
+ crdv1beta2 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta2"
crdv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
"github.com/kubernetes-csi/external-snapshotter/v8/pkg/utils"
)
@@ -52,7 +53,7 @@ func (ctrl *csiSnapshotSideCarController) enqueueGroupSnapshotContentWork(obj in
if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil {
obj = unknown.Obj
}
- if groupSnapshotContent, ok := obj.(*crdv1beta1.VolumeGroupSnapshotContent); ok {
+ if groupSnapshotContent, ok := obj.(*crdv1beta2.VolumeGroupSnapshotContent); ok {
objName, err := cache.DeletionHandlingMetaNamespaceKeyFunc(groupSnapshotContent)
if err != nil {
klog.Errorf("failed to get key from object: %v, %v", err, groupSnapshotContent)
@@ -125,7 +126,7 @@ func (ctrl *csiSnapshotSideCarController) syncGroupSnapshotContentByKey(key stri
klog.V(2).Infof("deletion of group snapshot content %q was already processed", key)
return nil
}
- groupSnapshotContent, ok := groupSnapshotContentObj.(*crdv1beta1.VolumeGroupSnapshotContent)
+ groupSnapshotContent, ok := groupSnapshotContentObj.(*crdv1beta2.VolumeGroupSnapshotContent)
if !ok {
klog.Errorf("expected group snapshot content, got %+v", groupSnapshotContent)
return nil
@@ -137,7 +138,7 @@ func (ctrl *csiSnapshotSideCarController) syncGroupSnapshotContentByKey(key stri
// updateGroupSnapshotContentInInformerCache runs in worker thread and handles
// "group snapshot content added", "group snapshot content updated" and "periodic
// sync" events.
-func (ctrl *csiSnapshotSideCarController) updateGroupSnapshotContentInInformerCache(groupSnapshotContent *crdv1beta1.VolumeGroupSnapshotContent) error {
+func (ctrl *csiSnapshotSideCarController) updateGroupSnapshotContentInInformerCache(groupSnapshotContent *crdv1beta2.VolumeGroupSnapshotContent) error {
// Store the new group snapshot content version in the cache and do not process
// it if this is an old version.
new, err := ctrl.storeGroupSnapshotContentUpdate(groupSnapshotContent)
@@ -163,13 +164,13 @@ func (ctrl *csiSnapshotSideCarController) updateGroupSnapshotContentInInformerCa
// deleteGroupSnapshotContentInCacheStore runs in worker thread and handles "group
// snapshot content deleted" event.
-func (ctrl *csiSnapshotSideCarController) deleteGroupSnapshotContentInCacheStore(groupSnapshotContent *crdv1beta1.VolumeGroupSnapshotContent) {
+func (ctrl *csiSnapshotSideCarController) deleteGroupSnapshotContentInCacheStore(groupSnapshotContent *crdv1beta2.VolumeGroupSnapshotContent) {
_ = ctrl.groupSnapshotContentStore.Delete(groupSnapshotContent)
klog.V(4).Infof("group snapshot content %q deleted", groupSnapshotContent.Name)
}
// syncGroupSnapshotContent deals with one key off the queue. It returns false when it's time to quit.
-func (ctrl *csiSnapshotSideCarController) syncGroupSnapshotContent(groupSnapshotContent *crdv1beta1.VolumeGroupSnapshotContent) error {
+func (ctrl *csiSnapshotSideCarController) syncGroupSnapshotContent(groupSnapshotContent *crdv1beta2.VolumeGroupSnapshotContent) error {
klog.V(5).Infof("synchronizing VolumeGroupSnapshotContent[%s]", groupSnapshotContent.Name)
if ctrl.shouldDeleteGroupSnapshotContent(groupSnapshotContent) {
@@ -209,7 +210,7 @@ func (ctrl *csiSnapshotSideCarController) syncGroupSnapshotContent(groupSnapshot
// removeGroupSnapshotContentFinalizer removes the VolumeGroupSnapshotContentFinalizer from a
// group snapshot content if there exists one.
-func (ctrl csiSnapshotSideCarController) removeGroupSnapshotContentFinalizer(groupSnapshotContent *crdv1beta1.VolumeGroupSnapshotContent) error {
+func (ctrl csiSnapshotSideCarController) removeGroupSnapshotContentFinalizer(groupSnapshotContent *crdv1beta2.VolumeGroupSnapshotContent) error {
if !slices.Contains(groupSnapshotContent.ObjectMeta.Finalizers, utils.VolumeGroupSnapshotContentFinalizer) {
// the finalizer does not exit, return directly
return nil
@@ -237,7 +238,7 @@ func (ctrl csiSnapshotSideCarController) removeGroupSnapshotContentFinalizer(gro
}
// Delete a groupsnapshot: Ask the backend to remove the groupsnapshot device
-func (ctrl *csiSnapshotSideCarController) deleteCSIGroupSnapshotOperation(groupSnapshotContent *crdv1beta1.VolumeGroupSnapshotContent) error {
+func (ctrl *csiSnapshotSideCarController) deleteCSIGroupSnapshotOperation(groupSnapshotContent *crdv1beta2.VolumeGroupSnapshotContent) error {
if groupSnapshotContent == nil {
return fmt.Errorf("groupSnapshotContent is nil")
}
@@ -254,8 +255,8 @@ func (ctrl *csiSnapshotSideCarController) deleteCSIGroupSnapshotOperation(groupS
// For static provisioning, they can be found in groupContent.Spec.Source.GroupSnapshotHandles.VolumeSnapshotHandles
var snapshotIDs []string
if groupSnapshotContent.Status != nil {
- if len(groupSnapshotContent.Status.VolumeSnapshotHandlePairList) != 0 {
- for _, contentRef := range groupSnapshotContent.Status.VolumeSnapshotHandlePairList {
+ if len(groupSnapshotContent.Status.VolumeSnapshotInfoList) != 0 {
+ for _, contentRef := range groupSnapshotContent.Status.VolumeSnapshotInfoList {
snapshotIDs = append(snapshotIDs, contentRef.SnapshotHandle)
}
} else if groupSnapshotContent.Spec.Source.GroupSnapshotHandles != nil {
@@ -286,10 +287,10 @@ func (ctrl *csiSnapshotSideCarController) deleteCSIGroupSnapshotOperation(groupS
// in groupSnapshotContent.Status. On success, the latest version of the group snapshot
// content object will be returned.
func (ctrl *csiSnapshotSideCarController) clearGroupSnapshotContentStatus(
- groupSnapshotContentName string) (*crdv1beta1.VolumeGroupSnapshotContent, error) {
+ groupSnapshotContentName string) (*crdv1beta2.VolumeGroupSnapshotContent, error) {
klog.V(5).Infof("clearGroupSnapshotContentStatus content [%s]", groupSnapshotContentName)
// get the latest version from API server
- groupSnapshotContent, err := ctrl.clientset.GroupsnapshotV1beta1().VolumeGroupSnapshotContents().Get(context.TODO(), groupSnapshotContentName, metav1.GetOptions{})
+ groupSnapshotContent, err := ctrl.clientset.GroupsnapshotV1beta2().VolumeGroupSnapshotContents().Get(context.TODO(), groupSnapshotContentName, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("error get group snapshot content %s from api server: %v", groupSnapshotContentName, err)
}
@@ -299,14 +300,14 @@ func (ctrl *csiSnapshotSideCarController) clearGroupSnapshotContentStatus(
groupSnapshotContent.Status.CreationTime = nil
groupSnapshotContent.Status.Error = nil
}
- newContent, err := ctrl.clientset.GroupsnapshotV1beta1().VolumeGroupSnapshotContents().UpdateStatus(context.TODO(), groupSnapshotContent, metav1.UpdateOptions{})
+ newContent, err := ctrl.clientset.GroupsnapshotV1beta2().VolumeGroupSnapshotContents().UpdateStatus(context.TODO(), groupSnapshotContent, metav1.UpdateOptions{})
if err != nil {
return groupSnapshotContent, newControllerUpdateError(groupSnapshotContentName, err.Error())
}
return newContent, nil
}
-func (ctrl *csiSnapshotSideCarController) GetCredentialsFromAnnotationForGroupSnapshot(groupSnapshotContent *crdv1beta1.VolumeGroupSnapshotContent) (map[string]string, error) {
+func (ctrl *csiSnapshotSideCarController) GetCredentialsFromAnnotationForGroupSnapshot(groupSnapshotContent *crdv1beta2.VolumeGroupSnapshotContent) (map[string]string, error) {
// get secrets if VolumeGroupSnapshotClass specifies it
var snapshotterCredentials map[string]string
var err error
@@ -338,7 +339,7 @@ func (ctrl *csiSnapshotSideCarController) GetCredentialsFromAnnotationForGroupSn
// shouldDeleteGroupSnapshotContent checks if groupSnapshotContent object should be deleted
// if DeletionTimestamp is set on the groupSnapshotContent
-func (ctrl *csiSnapshotSideCarController) shouldDeleteGroupSnapshotContent(groupSnapshotContent *crdv1beta1.VolumeGroupSnapshotContent) bool {
+func (ctrl *csiSnapshotSideCarController) shouldDeleteGroupSnapshotContent(groupSnapshotContent *crdv1beta2.VolumeGroupSnapshotContent) bool {
klog.V(5).Infof("Check if VolumeGroupSnapshotContent[%s] should be deleted.", groupSnapshotContent.Name)
if groupSnapshotContent.ObjectMeta.DeletionTimestamp == nil {
@@ -367,7 +368,7 @@ func (ctrl *csiSnapshotSideCarController) shouldDeleteGroupSnapshotContent(group
}
// createGroupSnapshot starts new asynchronous operation to create group snapshot
-func (ctrl *csiSnapshotSideCarController) createGroupSnapshot(groupSnapshotContent *crdv1beta1.VolumeGroupSnapshotContent) error {
+func (ctrl *csiSnapshotSideCarController) createGroupSnapshot(groupSnapshotContent *crdv1beta2.VolumeGroupSnapshotContent) error {
klog.V(5).Infof("createGroupSnapshot for group snapshot content [%s]: started", groupSnapshotContent.Name)
groupSnapshotContentObj, err := ctrl.createGroupSnapshotWrapper(groupSnapshotContent)
if err != nil {
@@ -386,7 +387,7 @@ func (ctrl *csiSnapshotSideCarController) createGroupSnapshot(groupSnapshotConte
}
// This is a wrapper function for the group snapshot creation process.
-func (ctrl *csiSnapshotSideCarController) createGroupSnapshotWrapper(groupSnapshotContent *crdv1beta1.VolumeGroupSnapshotContent) (*crdv1beta1.VolumeGroupSnapshotContent, error) {
+func (ctrl *csiSnapshotSideCarController) createGroupSnapshotWrapper(groupSnapshotContent *crdv1beta2.VolumeGroupSnapshotContent) (*crdv1beta2.VolumeGroupSnapshotContent, error) {
klog.Infof("createGroupSnapshotWrapper: Creating group snapshot for group snapshot content %s through the plugin ...", groupSnapshotContent.Name)
class, snapshotterCredentials, err := ctrl.getCSIGroupSnapshotInput(groupSnapshotContent)
@@ -439,15 +440,13 @@ func (ctrl *csiSnapshotSideCarController) createGroupSnapshotWrapper(groupSnapsh
}
// Create individual snapshots and snapshot contents
- var snapshotContentLinks []snapshotContentNameVolumeHandlePair
- for _, snapshot := range snapshots {
- snapshotContentLinks = append(snapshotContentLinks, snapshotContentNameVolumeHandlePair{
- snapshotHandle: snapshot.SnapshotId,
- volumeHandle: snapshot.SourceVolumeId,
- })
- }
-
- newGroupSnapshotContent, err := ctrl.updateGroupSnapshotContentStatus(groupSnapshotContent, groupSnapshotID, readyToUse, metav1.NewTime(creationTime), snapshotContentLinks)
+ newGroupSnapshotContent, err := ctrl.updateGroupSnapshotContentStatus(
+ groupSnapshotContent,
+ groupSnapshotID,
+ readyToUse,
+ metav1.NewTime(creationTime),
+ snapshots,
+ )
if err != nil {
klog.Errorf("error updating status for volume group snapshot content %s: %v.", groupSnapshotContent.Name, err)
return groupSnapshotContent, fmt.Errorf("error updating status for volume group snapshot content %s: %v", groupSnapshotContent.Name, err)
@@ -464,10 +463,10 @@ func (ctrl *csiSnapshotSideCarController) createGroupSnapshotWrapper(groupSnapsh
return groupSnapshotContent, nil
}
-func (ctrl *csiSnapshotSideCarController) getCSIGroupSnapshotInput(groupSnapshotContent *crdv1beta1.VolumeGroupSnapshotContent) (*crdv1beta1.VolumeGroupSnapshotClass, map[string]string, error) {
+func (ctrl *csiSnapshotSideCarController) getCSIGroupSnapshotInput(groupSnapshotContent *crdv1beta2.VolumeGroupSnapshotContent) (*crdv1beta2.VolumeGroupSnapshotClass, map[string]string, error) {
className := groupSnapshotContent.Spec.VolumeGroupSnapshotClassName
klog.V(5).Infof("getCSIGroupSnapshotInput for group snapshot content %s", groupSnapshotContent.Name)
- var class *crdv1beta1.VolumeGroupSnapshotClass
+ var class *crdv1beta2.VolumeGroupSnapshotClass
var err error
if className != nil {
class, err = ctrl.getGroupSnapshotClass(*className)
@@ -495,7 +494,7 @@ func (ctrl *csiSnapshotSideCarController) getCSIGroupSnapshotInput(groupSnapshot
}
// getGroupSnapshotClass is a helper function to get group snapshot class from the class name.
-func (ctrl *csiSnapshotSideCarController) getGroupSnapshotClass(className string) (*crdv1beta1.VolumeGroupSnapshotClass, error) {
+func (ctrl *csiSnapshotSideCarController) getGroupSnapshotClass(className string) (*crdv1beta2.VolumeGroupSnapshotClass, error) {
klog.V(5).Infof("getGroupSnapshotClass: VolumeGroupSnapshotClassName [%s]", className)
class, err := ctrl.groupSnapshotClassLister.Get(className)
@@ -510,7 +509,7 @@ func (ctrl *csiSnapshotSideCarController) getGroupSnapshotClass(className string
// setAnnVolumeGroupSnapshotBeingCreated sets VolumeGroupSnapshotBeingCreated annotation
// on VolumeGroupSnapshotContent
// If set, it indicates group snapshot is being created
-func (ctrl *csiSnapshotSideCarController) setAnnVolumeGroupSnapshotBeingCreated(groupSnapshotContent *crdv1beta1.VolumeGroupSnapshotContent) (*crdv1beta1.VolumeGroupSnapshotContent, error) {
+func (ctrl *csiSnapshotSideCarController) setAnnVolumeGroupSnapshotBeingCreated(groupSnapshotContent *crdv1beta2.VolumeGroupSnapshotContent) (*crdv1beta2.VolumeGroupSnapshotContent, error) {
if metav1.HasAnnotation(groupSnapshotContent.ObjectMeta, utils.AnnVolumeGroupSnapshotBeingCreated) {
// the annotation already exists, return directly
return groupSnapshotContent, nil
@@ -551,7 +550,7 @@ func (ctrl *csiSnapshotSideCarController) setAnnVolumeGroupSnapshotBeingCreated(
// removeAnnVolumeGroupSnapshotBeingCreated removes the VolumeGroupSnapshotBeingCreated
// annotation from a groupSnapshotContent if there exists one.
-func (ctrl csiSnapshotSideCarController) removeAnnVolumeGroupSnapshotBeingCreated(groupSnapshotContent *crdv1beta1.VolumeGroupSnapshotContent) (*crdv1beta1.VolumeGroupSnapshotContent, error) {
+func (ctrl csiSnapshotSideCarController) removeAnnVolumeGroupSnapshotBeingCreated(groupSnapshotContent *crdv1beta2.VolumeGroupSnapshotContent) (*crdv1beta2.VolumeGroupSnapshotContent, error) {
if !metav1.HasAnnotation(groupSnapshotContent.ObjectMeta, utils.AnnVolumeGroupSnapshotBeingCreated) {
// the annotation does not exist, return directly
return groupSnapshotContent, nil
@@ -579,32 +578,34 @@ func (ctrl csiSnapshotSideCarController) removeAnnVolumeGroupSnapshotBeingCreate
}
func (ctrl *csiSnapshotSideCarController) updateGroupSnapshotContentStatus(
- groupSnapshotContent *crdv1beta1.VolumeGroupSnapshotContent,
+ groupSnapshotContent *crdv1beta2.VolumeGroupSnapshotContent,
groupSnapshotHandle string,
readyToUse bool,
createdAt metav1.Time,
- snapshotContentLinks []snapshotContentNameVolumeHandlePair,
-) (*crdv1beta1.VolumeGroupSnapshotContent, error) {
+ snapshotList []*csi.Snapshot,
+) (*crdv1beta2.VolumeGroupSnapshotContent, error) {
klog.V(5).Infof("updateGroupSnapshotContentStatus: updating VolumeGroupSnapshotContent [%s], groupSnapshotHandle %s, readyToUse %v, createdAt %v", groupSnapshotContent.Name, groupSnapshotHandle, readyToUse, createdAt)
- groupSnapshotContentObj, err := ctrl.clientset.GroupsnapshotV1beta1().VolumeGroupSnapshotContents().Get(context.TODO(), groupSnapshotContent.Name, metav1.GetOptions{})
+ groupSnapshotContentObj, err := ctrl.clientset.GroupsnapshotV1beta2().VolumeGroupSnapshotContents().Get(context.TODO(), groupSnapshotContent.Name, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("error get group snapshot content %s from api server: %v", groupSnapshotContent.Name, err)
}
- var newStatus *crdv1beta1.VolumeGroupSnapshotContentStatus
+ var newStatus *crdv1beta2.VolumeGroupSnapshotContentStatus
updated := false
if groupSnapshotContentObj.Status == nil {
- newStatus = &crdv1beta1.VolumeGroupSnapshotContentStatus{
+ newStatus = &crdv1beta2.VolumeGroupSnapshotContentStatus{
VolumeGroupSnapshotHandle: &groupSnapshotHandle,
ReadyToUse: &readyToUse,
CreationTime: &createdAt,
}
- for _, snapshotContentLink := range snapshotContentLinks {
-
- newStatus.VolumeSnapshotHandlePairList = append(newStatus.VolumeSnapshotHandlePairList, crdv1beta1.VolumeSnapshotHandlePair{
- VolumeHandle: snapshotContentLink.volumeHandle,
- SnapshotHandle: snapshotContentLink.snapshotHandle,
+ for _, snapshot := range snapshotList {
+ newStatus.VolumeSnapshotInfoList = append(newStatus.VolumeSnapshotInfoList, crdv1beta2.VolumeSnapshotInfo{
+ VolumeHandle: snapshot.SourceVolumeId,
+ SnapshotHandle: snapshot.SnapshotId,
+ CreationTime: utils.CSITimestampToKubernetes(snapshot.CreationTime),
+ ReadyToUse: &snapshot.ReadyToUse,
+ RestoreSize: utils.CSISizeToKubernetes(snapshot.SizeBytes),
})
}
@@ -626,11 +627,14 @@ func (ctrl *csiSnapshotSideCarController) updateGroupSnapshotContentStatus(
newStatus.CreationTime = &createdAt
updated = true
}
- if len(newStatus.VolumeSnapshotHandlePairList) == 0 {
- for _, snapshotContentLink := range snapshotContentLinks {
- newStatus.VolumeSnapshotHandlePairList = append(newStatus.VolumeSnapshotHandlePairList, crdv1beta1.VolumeSnapshotHandlePair{
- VolumeHandle: snapshotContentLink.volumeHandle,
- SnapshotHandle: snapshotContentLink.snapshotHandle,
+ if len(newStatus.VolumeSnapshotInfoList) == 0 {
+ for _, snapshot := range snapshotList {
+ newStatus.VolumeSnapshotInfoList = append(newStatus.VolumeSnapshotInfoList, crdv1beta2.VolumeSnapshotInfo{
+ VolumeHandle: snapshot.SourceVolumeId,
+ SnapshotHandle: snapshot.SnapshotId,
+ CreationTime: utils.CSITimestampToKubernetes(snapshot.CreationTime),
+ ReadyToUse: &snapshot.ReadyToUse,
+ RestoreSize: utils.CSISizeToKubernetes(snapshot.SizeBytes),
})
}
updated = true
@@ -640,7 +644,7 @@ func (ctrl *csiSnapshotSideCarController) updateGroupSnapshotContentStatus(
if updated {
groupSnapshotContentClone := groupSnapshotContentObj.DeepCopy()
groupSnapshotContentClone.Status = newStatus
- newContent, err := ctrl.clientset.GroupsnapshotV1beta1().VolumeGroupSnapshotContents().UpdateStatus(context.TODO(), groupSnapshotContentClone, metav1.UpdateOptions{})
+ newContent, err := ctrl.clientset.GroupsnapshotV1beta2().VolumeGroupSnapshotContents().UpdateStatus(context.TODO(), groupSnapshotContentClone, metav1.UpdateOptions{})
if err != nil {
return groupSnapshotContentObj, newControllerUpdateError(groupSnapshotContent.Name, err.Error())
}
@@ -657,7 +661,7 @@ func (ctrl *csiSnapshotSideCarController) updateGroupSnapshotContentStatus(
//
// * groupSnapshotContent - group snapshot content to update
// * eventtype, reason, message - event to send, see EventRecorder.Event()
-func (ctrl *csiSnapshotSideCarController) updateGroupSnapshotContentErrorStatusWithEvent(groupSnapshotContent *crdv1beta1.VolumeGroupSnapshotContent, eventtype, reason, message string) error {
+func (ctrl *csiSnapshotSideCarController) updateGroupSnapshotContentErrorStatusWithEvent(groupSnapshotContent *crdv1beta2.VolumeGroupSnapshotContent, eventtype, reason, message string) error {
klog.V(5).Infof("updateGroupSnapshotContentErrorStatusWithEvent[%s]", groupSnapshotContent.Name)
if groupSnapshotContent.Status != nil && groupSnapshotContent.Status.Error != nil && *groupSnapshotContent.Status.Error.Message == message {
@@ -678,7 +682,7 @@ func (ctrl *csiSnapshotSideCarController) updateGroupSnapshotContentErrorStatusW
patches = append(patches, utils.PatchOp{
Op: "replace",
Path: "/status",
- Value: &crdv1beta1.VolumeGroupSnapshotContentStatus{
+ Value: &crdv1beta2.VolumeGroupSnapshotContentStatus{
ReadyToUse: &ready,
Error: groupSnapshotContentStatusError,
},
@@ -728,7 +732,7 @@ func GetSnapshotContentNameForVolumeGroupSnapshotContent(groupSnapshotContentUUI
return fmt.Sprintf("snapcontent-%x-%s", sha256.Sum256([]byte(groupSnapshotContentUUID+pvUUID)), time.Now().Format("2006-01-02-3.4.5"))
}
-func (ctrl *csiSnapshotSideCarController) checkandUpdateGroupSnapshotContentStatus(groupSnapshotContent *crdv1beta1.VolumeGroupSnapshotContent) error {
+func (ctrl *csiSnapshotSideCarController) checkandUpdateGroupSnapshotContentStatus(groupSnapshotContent *crdv1beta2.VolumeGroupSnapshotContent) error {
klog.V(5).Infof("checkandUpdateGroupSnapshotContentStatus[%s] started", groupSnapshotContent.Name)
groupSnapshotContentObj, err := ctrl.checkandUpdateGroupSnapshotContentStatusOperation(groupSnapshotContent)
if err != nil {
@@ -745,7 +749,7 @@ func (ctrl *csiSnapshotSideCarController) checkandUpdateGroupSnapshotContentStat
return nil
}
-func (ctrl *csiSnapshotSideCarController) checkandUpdateGroupSnapshotContentStatusOperation(groupSnapshotContent *crdv1beta1.VolumeGroupSnapshotContent) (*crdv1beta1.VolumeGroupSnapshotContent, error) {
+func (ctrl *csiSnapshotSideCarController) checkandUpdateGroupSnapshotContentStatusOperation(groupSnapshotContent *crdv1beta2.VolumeGroupSnapshotContent) (*crdv1beta2.VolumeGroupSnapshotContent, error) {
var err error
var creationTime time.Time
readyToUse := false
@@ -793,7 +797,7 @@ func (ctrl *csiSnapshotSideCarController) checkandUpdateGroupSnapshotContentStat
}
// TODO: Get a reference to snapshot contents for this volume group snapshot
- updatedContent, err := ctrl.updateGroupSnapshotContentStatus(groupSnapshotContent, groupSnapshotID, readyToUse, metav1.NewTime(creationTime), []snapshotContentNameVolumeHandlePair{})
+ updatedContent, err := ctrl.updateGroupSnapshotContentStatus(groupSnapshotContent, groupSnapshotID, readyToUse, metav1.NewTime(creationTime), []*csi.Snapshot{})
if err != nil {
return groupSnapshotContent, err
}
diff --git a/pkg/sidecar-controller/groupsnapshot_helper_test.go b/pkg/sidecar-controller/groupsnapshot_helper_test.go
index 86af6f7dc..cd19249bb 100644
--- a/pkg/sidecar-controller/groupsnapshot_helper_test.go
+++ b/pkg/sidecar-controller/groupsnapshot_helper_test.go
@@ -3,7 +3,7 @@ package sidecar_controller
import (
"testing"
- crdv1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta1"
+ crdv1beta2 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta2"
v1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
"k8s.io/apimachinery/pkg/labels"
@@ -36,9 +36,9 @@ func TestDeleteCSIGroupSnapshotOperation(t *testing.T) {
if err == nil {
t.Errorf("expected deleteCSIGroupSnapshotOperation to return error when groupsnapshotContent is nil: %v", err)
}
- gsc := crdv1beta1.VolumeGroupSnapshotContent{
- Status: &crdv1beta1.VolumeGroupSnapshotContentStatus{
- VolumeSnapshotHandlePairList: []crdv1beta1.VolumeSnapshotHandlePair{
+ gsc := crdv1beta2.VolumeGroupSnapshotContent{
+ Status: &crdv1beta2.VolumeGroupSnapshotContentStatus{
+ VolumeSnapshotInfoList: []crdv1beta2.VolumeSnapshotInfo{
{
VolumeHandle: "test-pv",
SnapshotHandle: "test-vsc",
diff --git a/pkg/sidecar-controller/snapshot_controller.go b/pkg/sidecar-controller/snapshot_controller.go
index 73b8a2c88..ec67be7cc 100644
--- a/pkg/sidecar-controller/snapshot_controller.go
+++ b/pkg/sidecar-controller/snapshot_controller.go
@@ -561,15 +561,6 @@ func (e controllerUpdateError) Error() string {
return e.message
}
-func isControllerUpdateFailError(err *crdv1.VolumeSnapshotError) bool {
- if err != nil {
- if strings.Contains(*err.Message, controllerUpdateFailMsg) {
- return true
- }
- }
- return false
-}
-
func (ctrl *csiSnapshotSideCarController) GetCredentialsFromAnnotation(content *crdv1.VolumeSnapshotContent) (map[string]string, error) {
// get secrets if VolumeSnapshotClass specifies it
var snapshotterCredentials map[string]string
diff --git a/pkg/sidecar-controller/snapshot_controller_base.go b/pkg/sidecar-controller/snapshot_controller_base.go
index 9b0a51a63..d25b0bb99 100644
--- a/pkg/sidecar-controller/snapshot_controller_base.go
+++ b/pkg/sidecar-controller/snapshot_controller_base.go
@@ -18,14 +18,17 @@ package sidecar_controller
import (
"fmt"
+ "sync"
"time"
+ "github.com/kubernetes-csi/external-snapshotter/v8/pkg/features"
"github.com/kubernetes-csi/external-snapshotter/v8/pkg/group_snapshotter"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
+ utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
@@ -34,12 +37,12 @@ import (
"k8s.io/client-go/util/workqueue"
klog "k8s.io/klog/v2"
- crdv1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta1"
+ crdv1beta2 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta2"
crdv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
clientset "github.com/kubernetes-csi/external-snapshotter/client/v8/clientset/versioned"
- groupsnapshotinformers "github.com/kubernetes-csi/external-snapshotter/client/v8/informers/externalversions/volumegroupsnapshot/v1beta1"
+ groupsnapshotinformers "github.com/kubernetes-csi/external-snapshotter/client/v8/informers/externalversions/volumegroupsnapshot/v1beta2"
snapshotinformers "github.com/kubernetes-csi/external-snapshotter/client/v8/informers/externalversions/volumesnapshot/v1"
- groupsnapshotlisters "github.com/kubernetes-csi/external-snapshotter/client/v8/listers/volumegroupsnapshot/v1beta1"
+ groupsnapshotlisters "github.com/kubernetes-csi/external-snapshotter/client/v8/listers/volumegroupsnapshot/v1beta2"
snapshotlisters "github.com/kubernetes-csi/external-snapshotter/client/v8/listers/volumesnapshot/v1"
"github.com/kubernetes-csi/external-snapshotter/v8/pkg/snapshotter"
"github.com/kubernetes-csi/external-snapshotter/v8/pkg/utils"
@@ -167,7 +170,7 @@ func NewCSISnapshotSideCarController(
return ctrl
}
-func (ctrl *csiSnapshotSideCarController) Run(workers int, stopCh <-chan struct{}) {
+func (ctrl *csiSnapshotSideCarController) Run(workers int, stopCh <-chan struct{}, wg *sync.WaitGroup) {
defer ctrl.contentQueue.ShutDown()
klog.Infof("Starting CSI snapshotter")
@@ -185,10 +188,27 @@ func (ctrl *csiSnapshotSideCarController) Run(workers int, stopCh <-chan struct{
ctrl.initializeCaches()
- for i := 0; i < workers; i++ {
- go wait.Until(ctrl.contentWorker, 0, stopCh)
- if ctrl.enableVolumeGroupSnapshots {
- go wait.Until(ctrl.groupSnapshotContentWorker, 0, stopCh)
+ if utilfeature.DefaultFeatureGate.Enabled(features.ReleaseLeaderElectionOnExit) {
+ for i := 0; i < workers; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ wait.Until(ctrl.contentWorker, 0, stopCh)
+ }()
+ if ctrl.enableVolumeGroupSnapshots {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ wait.Until(ctrl.groupSnapshotContentWorker, 0, stopCh)
+ }()
+ }
+ }
+ } else {
+ for i := 0; i < workers; i++ {
+ go wait.Until(ctrl.contentWorker, 0, stopCh)
+ if ctrl.enableVolumeGroupSnapshots {
+ go wait.Until(ctrl.groupSnapshotContentWorker, 0, stopCh)
+ }
}
}
@@ -333,7 +353,7 @@ func (ctrl *csiSnapshotSideCarController) isDriverMatch(object interface{}) bool
}
return true
}
- if content, ok := object.(*crdv1beta1.VolumeGroupSnapshotContent); ok {
+ if content, ok := object.(*crdv1beta2.VolumeGroupSnapshotContent); ok {
if content.Spec.Source.GroupSnapshotHandles == nil && len(content.Spec.Source.VolumeHandles) == 0 {
// Skip this group snapshot content if it does not have a valid source
return false
diff --git a/pkg/utils/conversion.go b/pkg/utils/conversion.go
new file mode 100644
index 000000000..32ac41601
--- /dev/null
+++ b/pkg/utils/conversion.go
@@ -0,0 +1,40 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package utils
+
+import "google.golang.org/protobuf/types/known/timestamppb"
+
+// CSITimestampToKubernetes converts a CSI timestamp in the format
+// used by the Snapshot API
+func CSITimestampToKubernetes(creationTime *timestamppb.Timestamp) *int64 {
+ if creationTime == nil {
+ return nil
+ }
+
+ result := creationTime.AsTime().UnixNano()
+ return &result
+}
+
+// CSISizeToKubernetes converts the size to the format used by
+// the Snapshot API
+func CSISizeToKubernetes(sizeBytes int64) *int64 {
+ if sizeBytes == 0 {
+ return nil
+ }
+
+ return &sizeBytes
+}
diff --git a/pkg/utils/conversion_test.go b/pkg/utils/conversion_test.go
new file mode 100644
index 000000000..35e61d63d
--- /dev/null
+++ b/pkg/utils/conversion_test.go
@@ -0,0 +1,44 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package utils
+
+import (
+ "testing"
+
+ "google.golang.org/protobuf/types/known/timestamppb"
+)
+
+func TestCsiSizeToKubernetes(t *testing.T) {
+ if CSISizeToKubernetes(0) != nil {
+ t.Error("Expected nil")
+ }
+
+ if *CSISizeToKubernetes(123) != 123 {
+ t.Error("Wrong size conversion")
+ }
+}
+
+func TestCsiTimestampToKubernetes(t *testing.T) {
+ if CSITimestampToKubernetes(nil) != nil {
+ t.Error("Expected nil")
+ }
+
+ now := timestamppb.Now()
+ if *CSITimestampToKubernetes(now) != now.AsTime().UnixNano() {
+ t.Error("Expected correct time conversion")
+ }
+}
diff --git a/pkg/utils/patch.go b/pkg/utils/patch.go
index 7326e2867..8f08d0bae 100644
--- a/pkg/utils/patch.go
+++ b/pkg/utils/patch.go
@@ -4,7 +4,7 @@ import (
"context"
"encoding/json"
- crdv1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta1"
+ crdv1beta2 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta2"
crdv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
clientset "github.com/kubernetes-csi/external-snapshotter/client/v8/clientset/versioned"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -60,17 +60,17 @@ func PatchVolumeSnapshot(
// PatchVolumeGroupSnapshot patches a volume group snapshot object
func PatchVolumeGroupSnapshot(
- existingGroupSnapshot *crdv1beta1.VolumeGroupSnapshot,
+ existingGroupSnapshot *crdv1beta2.VolumeGroupSnapshot,
patch []PatchOp,
client clientset.Interface,
subresources ...string,
-) (*crdv1beta1.VolumeGroupSnapshot, error) {
+) (*crdv1beta2.VolumeGroupSnapshot, error) {
data, err := json.Marshal(patch)
if nil != err {
return existingGroupSnapshot, err
}
- newGroupSnapshot, err := client.GroupsnapshotV1beta1().VolumeGroupSnapshots(existingGroupSnapshot.Namespace).Patch(context.TODO(), existingGroupSnapshot.Name, types.JSONPatchType, data, metav1.PatchOptions{}, subresources...)
+ newGroupSnapshot, err := client.GroupsnapshotV1beta2().VolumeGroupSnapshots(existingGroupSnapshot.Namespace).Patch(context.TODO(), existingGroupSnapshot.Name, types.JSONPatchType, data, metav1.PatchOptions{}, subresources...)
if err != nil {
return existingGroupSnapshot, err
}
@@ -80,17 +80,17 @@ func PatchVolumeGroupSnapshot(
// PatchVolumeGroupSnapshotContent patches a volume group snapshot content object
func PatchVolumeGroupSnapshotContent(
- existingGroupSnapshotContent *crdv1beta1.VolumeGroupSnapshotContent,
+ existingGroupSnapshotContent *crdv1beta2.VolumeGroupSnapshotContent,
patch []PatchOp,
client clientset.Interface,
subresources ...string,
-) (*crdv1beta1.VolumeGroupSnapshotContent, error) {
+) (*crdv1beta2.VolumeGroupSnapshotContent, error) {
data, err := json.Marshal(patch)
if nil != err {
return existingGroupSnapshotContent, err
}
- newGroupSnapshotContent, err := client.GroupsnapshotV1beta1().VolumeGroupSnapshotContents().Patch(context.TODO(), existingGroupSnapshotContent.Name, types.JSONPatchType, data, metav1.PatchOptions{}, subresources...)
+ newGroupSnapshotContent, err := client.GroupsnapshotV1beta2().VolumeGroupSnapshotContents().Patch(context.TODO(), existingGroupSnapshotContent.Name, types.JSONPatchType, data, metav1.PatchOptions{}, subresources...)
if err != nil {
return existingGroupSnapshotContent, err
}
diff --git a/pkg/utils/util.go b/pkg/utils/util.go
index f4f82b34a..7cdac6523 100644
--- a/pkg/utils/util.go
+++ b/pkg/utils/util.go
@@ -35,7 +35,7 @@ import (
"k8s.io/client-go/tools/cache"
klog "k8s.io/klog/v2"
- crdv1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta1"
+ crdv1beta2 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta2"
crdv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
)
@@ -403,7 +403,7 @@ func GetSecretReference(secretParams secretParamsMap, snapshotClassParams map[st
}
// GetSecretReference for the group snapshot
-func GetGroupSnapshotSecretReference(secretParams secretParamsMap, volumeGroupSnapshotClassParams map[string]string, groupSnapContentName string, volumeGroupSnapshot *crdv1beta1.VolumeGroupSnapshot) (*v1.SecretReference, error) {
+func GetGroupSnapshotSecretReference(secretParams secretParamsMap, volumeGroupSnapshotClassParams map[string]string, groupSnapContentName string, volumeGroupSnapshot *crdv1beta2.VolumeGroupSnapshot) (*v1.SecretReference, error) {
nameTemplate, namespaceTemplate, err := verifyAndGetSecretNameAndNamespaceTemplate(secretParams, volumeGroupSnapshotClassParams)
if err != nil {
return nil, fmt.Errorf("failed to get name and namespace template from params: %v", err)
@@ -502,7 +502,7 @@ func NeedToAddContentFinalizer(content *crdv1.VolumeSnapshotContent) bool {
}
// NeedToAddGroupSnapshotContentFinalizer checks if a Finalizer needs to be added for the volume group snapshot content.
-func NeedToAddGroupSnapshotContentFinalizer(groupSnapshotContent *crdv1beta1.VolumeGroupSnapshotContent) bool {
+func NeedToAddGroupSnapshotContentFinalizer(groupSnapshotContent *crdv1beta2.VolumeGroupSnapshotContent) bool {
return groupSnapshotContent.ObjectMeta.DeletionTimestamp == nil && !slices.Contains(groupSnapshotContent.ObjectMeta.Finalizers, VolumeGroupSnapshotContentFinalizer)
}
@@ -514,7 +514,7 @@ func IsSnapshotDeletionCandidate(snapshot *crdv1.VolumeSnapshot) bool {
// IsGroupSnapshotDeletionCandidate checks if a volume group snapshot deletionTimestamp
// is set and any finalizer is on the group snapshot.
-func IsGroupSnapshotDeletionCandidate(groupSnapshot *crdv1beta1.VolumeGroupSnapshot) bool {
+func IsGroupSnapshotDeletionCandidate(groupSnapshot *crdv1beta2.VolumeGroupSnapshot) bool {
return groupSnapshot.ObjectMeta.DeletionTimestamp != nil && slices.Contains(groupSnapshot.ObjectMeta.Finalizers, VolumeGroupSnapshotBoundFinalizer)
}
@@ -529,7 +529,7 @@ func NeedToAddSnapshotBoundFinalizer(snapshot *crdv1.VolumeSnapshot) bool {
}
// NeedToAddGroupSnapshotBoundFinalizer checks if a Finalizer needs to be added for the bound volume group snapshot.
-func NeedToAddGroupSnapshotBoundFinalizer(groupSnapshot *crdv1beta1.VolumeGroupSnapshot) bool {
+func NeedToAddGroupSnapshotBoundFinalizer(groupSnapshot *crdv1beta2.VolumeGroupSnapshot) bool {
return groupSnapshot.ObjectMeta.DeletionTimestamp == nil && !slices.Contains(groupSnapshot.ObjectMeta.Finalizers, VolumeGroupSnapshotBoundFinalizer) && IsBoundVolumeGroupSnapshotContentNameSet(groupSnapshot)
}
@@ -583,7 +583,7 @@ func GetSnapshotStatusForLogging(snapshot *crdv1.VolumeSnapshot) string {
return fmt.Sprintf("bound to: %q, Completed: %v", snapshotContentName, ready)
}
-func GetGroupSnapshotStatusForLogging(groupSnapshot *crdv1beta1.VolumeGroupSnapshot) string {
+func GetGroupSnapshotStatusForLogging(groupSnapshot *crdv1beta2.VolumeGroupSnapshot) string {
groupSnapshotContentName := ""
if groupSnapshot.Status != nil && groupSnapshot.Status.BoundVolumeGroupSnapshotContentName != nil {
groupSnapshotContentName = *groupSnapshot.Status.BoundVolumeGroupSnapshotContentName
@@ -623,7 +623,7 @@ func IsSnapshotCreated(snapshot *crdv1.VolumeSnapshot) bool {
return snapshot.Status != nil && snapshot.Status.CreationTime != nil
}
-func GroupSnapshotKey(vgs *crdv1beta1.VolumeGroupSnapshot) string {
+func GroupSnapshotKey(vgs *crdv1beta2.VolumeGroupSnapshot) string {
return fmt.Sprintf("%s/%s", vgs.Namespace, vgs.Name)
}
@@ -631,21 +631,21 @@ func GroupSnapshotRefKey(vgsref *v1.ObjectReference) string {
return fmt.Sprintf("%s/%s", vgsref.Namespace, vgsref.Name)
}
-func IsGroupSnapshotReady(groupSnapshot *crdv1beta1.VolumeGroupSnapshot) bool {
+func IsGroupSnapshotReady(groupSnapshot *crdv1beta2.VolumeGroupSnapshot) bool {
if groupSnapshot.Status == nil || groupSnapshot.Status.ReadyToUse == nil || *groupSnapshot.Status.ReadyToUse == false {
return false
}
return true
}
-func IsBoundVolumeGroupSnapshotContentNameSet(groupSnapshot *crdv1beta1.VolumeGroupSnapshot) bool {
+func IsBoundVolumeGroupSnapshotContentNameSet(groupSnapshot *crdv1beta2.VolumeGroupSnapshot) bool {
if groupSnapshot.Status == nil || groupSnapshot.Status.BoundVolumeGroupSnapshotContentName == nil || *groupSnapshot.Status.BoundVolumeGroupSnapshotContentName == "" {
return false
}
return true
}
-func IsVolumeGroupSnapshotRefSet(groupSnapshot *crdv1beta1.VolumeGroupSnapshot, content *crdv1beta1.VolumeGroupSnapshotContent) bool {
+func IsVolumeGroupSnapshotRefSet(groupSnapshot *crdv1beta2.VolumeGroupSnapshot, content *crdv1beta2.VolumeGroupSnapshotContent) bool {
if content.Spec.VolumeGroupSnapshotRef.Name == groupSnapshot.Name &&
content.Spec.VolumeGroupSnapshotRef.Namespace == groupSnapshot.Namespace &&
content.Spec.VolumeGroupSnapshotRef.UID == groupSnapshot.UID {
@@ -655,13 +655,13 @@ func IsVolumeGroupSnapshotRefSet(groupSnapshot *crdv1beta1.VolumeGroupSnapshot,
}
// IsGroupSnapshotCreated indicates that the group snapshot has been cut on a storage system
-func IsGroupSnapshotCreated(groupSnapshot *crdv1beta1.VolumeGroupSnapshot) bool {
+func IsGroupSnapshotCreated(groupSnapshot *crdv1beta2.VolumeGroupSnapshot) bool {
return groupSnapshot.Status != nil && groupSnapshot.Status.CreationTime != nil
}
// GetDynamicSnapshotContentNameFoGrouprSnapshot returns a unique content name for the
// passed in VolumeGroupSnapshot to dynamically provision a group snapshot.
-func GetDynamicSnapshotContentNameForGroupSnapshot(groupSnapshot *crdv1beta1.VolumeGroupSnapshot) string {
+func GetDynamicSnapshotContentNameForGroupSnapshot(groupSnapshot *crdv1beta2.VolumeGroupSnapshot) string {
return "groupsnapcontent-" + string(groupSnapshot.UID)
}
diff --git a/pkg/utils/vgs.go b/pkg/utils/vgs.go
index 1c29cbe60..4599d2a99 100644
--- a/pkg/utils/vgs.go
+++ b/pkg/utils/vgs.go
@@ -19,7 +19,7 @@ package utils
import (
"fmt"
- crdv1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta1"
+ crdv1beta2 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1beta2"
crdv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
@@ -38,8 +38,8 @@ func getVolumeGroupSnapshotParentObjectName(snapshot *crdv1.VolumeSnapshot) stri
apiVersion := fmt.Sprintf(
"%s/%s",
- crdv1beta1.SchemeGroupVersion.Group,
- crdv1beta1.SchemeGroupVersion.Version,
+ crdv1beta2.SchemeGroupVersion.Group,
+ crdv1beta2.SchemeGroupVersion.Version,
)
for _, owner := range snapshot.ObjectMeta.OwnerReferences {
@@ -100,12 +100,12 @@ func NeedToAddVolumeGroupSnapshotOwnership(snapshot *crdv1.VolumeSnapshot) bool
// BuildVolumeGroupSnapshotOwnerReference creates a OwnerReference record declaring an
// object as a child of passed VolumeGroupSnapshot
-func BuildVolumeGroupSnapshotOwnerReference(parentGroup *crdv1beta1.VolumeGroupSnapshot) metav1.OwnerReference {
+func BuildVolumeGroupSnapshotOwnerReference(parentGroup *crdv1beta2.VolumeGroupSnapshot) metav1.OwnerReference {
return metav1.OwnerReference{
APIVersion: fmt.Sprintf(
"%s/%s",
- crdv1beta1.SchemeGroupVersion.Group,
- crdv1beta1.SchemeGroupVersion.Version,
+ crdv1beta2.SchemeGroupVersion.Group,
+ crdv1beta2.SchemeGroupVersion.Version,
),
Kind: "VolumeGroupSnapshot",
Name: parentGroup.Name,
diff --git a/pkg/utils/vgs_test.go b/pkg/utils/vgs_test.go
index a21826546..ec9b0dfdc 100644
--- a/pkg/utils/vgs_test.go
+++ b/pkg/utils/vgs_test.go
@@ -70,7 +70,7 @@ func TestIsVolumeSnapshotGroupMember(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{
OwnerReferences: []metav1.OwnerReference{
{
- APIVersion: "test/v1beta1",
+ APIVersion: "test/v1beta2",
Kind: "VolumeGroupSnapshot",
Name: "vgs",
},
@@ -89,7 +89,7 @@ func TestIsVolumeSnapshotGroupMember(t *testing.T) {
Namespace: "default",
OwnerReferences: []metav1.OwnerReference{
{
- APIVersion: "groupsnapshot.storage.k8s.io/v1beta1",
+ APIVersion: "groupsnapshot.storage.k8s.io/v1beta2",
Kind: "VolumeGroupSnapshot",
Name: "vgs",
},
@@ -191,7 +191,7 @@ func TestNeedToAddVolumeGroupSnapshotOwnership(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{
OwnerReferences: []metav1.OwnerReference{
{
- APIVersion: "groupsnapshot.storage.k8s.io/v1beta1",
+ APIVersion: "groupsnapshot.storage.k8s.io/v1beta2",
Kind: "VolumeGroupSnapshot",
Name: "vgs",
},
diff --git a/pkg/webhook/certwatcher.go b/pkg/webhook/certwatcher.go
new file mode 100644
index 000000000..740f08d69
--- /dev/null
+++ b/pkg/webhook/certwatcher.go
@@ -0,0 +1,164 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package webhook
+
+import (
+ "context"
+ "crypto/tls"
+ "sync"
+
+ "github.com/fsnotify/fsnotify"
+ "k8s.io/klog/v2"
+)
+
+// This file originated from github.com/kubernetes-sigs/controller-runtime/pkg/webhook/internal/certwatcher.
+// We cannot import this package as it's an internal one. In addition, we cannot yet easily integrate
+// with controller-runtime/pkg/webhook directly, as it would require extensive rework:
+// https://github.com/kubernetes-csi/external-snapshotter/issues/422
+
+// CertWatcher watches certificate and key files for changes. When either file
+// changes, it reads and parses both and calls an optional callback with the new
+// certificate.
+type CertWatcher struct {
+ sync.Mutex
+
+ currentCert *tls.Certificate
+ watcher *fsnotify.Watcher
+
+ certPath string
+ keyPath string
+}
+
+// NewCertWatcher returns a new CertWatcher watching the given certificate and key.
+func NewCertWatcher(certPath, keyPath string) (*CertWatcher, error) {
+ var err error
+
+ cw := &CertWatcher{
+ certPath: certPath,
+ keyPath: keyPath,
+ }
+
+ // Initial read of certificate and key.
+ if err := cw.ReadCertificate(); err != nil {
+ return nil, err
+ }
+
+ cw.watcher, err = fsnotify.NewWatcher()
+ if err != nil {
+ return nil, err
+ }
+
+ return cw, nil
+}
+
+// GetCertificate fetches the currently loaded certificate, which may be nil.
+func (cw *CertWatcher) GetCertificate(_ *tls.ClientHelloInfo) (*tls.Certificate, error) {
+ cw.Lock()
+ defer cw.Unlock()
+ return cw.currentCert, nil
+}
+
+// Start starts the watch on the certificate and key files.
+func (cw *CertWatcher) Start(ctx context.Context) error {
+ files := []string{cw.certPath, cw.keyPath}
+
+ for _, f := range files {
+ if err := cw.watcher.Add(f); err != nil {
+ return err
+ }
+ }
+
+ go cw.Watch()
+
+ // Block until the context is done.
+ <-ctx.Done()
+
+ return cw.watcher.Close()
+}
+
+// Watch reads events from the watcher's channel and reacts to changes.
+func (cw *CertWatcher) Watch() {
+ for {
+ select {
+ case event, ok := <-cw.watcher.Events:
+ // Channel is closed.
+ if !ok {
+ return
+ }
+
+ cw.handleEvent(event)
+
+ case err, ok := <-cw.watcher.Errors:
+ // Channel is closed.
+ if !ok {
+ return
+ }
+
+ klog.Error(err, "certificate watch error")
+ }
+ }
+}
+
+// ReadCertificate reads the certificate and key files from disk, parses them,
+// and updates the current certificate on the watcher. If a callback is set, it
+// is invoked with the new certificate.
+func (cw *CertWatcher) ReadCertificate() error {
+ cert, err := tls.LoadX509KeyPair(cw.certPath, cw.keyPath)
+ if err != nil {
+ return err
+ }
+
+ cw.Lock()
+ cw.currentCert = &cert
+ cw.Unlock()
+
+ klog.Info("Updated current TLS certificate")
+
+ return nil
+}
+
+func (cw *CertWatcher) handleEvent(event fsnotify.Event) {
+ // Only care about events which may modify the contents of the file.
+ if !(isWrite(event) || isRemove(event) || isCreate(event)) {
+ return
+ }
+
+ klog.V(1).Info("certificate event", "event", event)
+
+ // If the file was removed, re-add the watch.
+ if isRemove(event) {
+ if err := cw.watcher.Add(event.Name); err != nil {
+ klog.Error(err, "error re-watching file")
+ }
+ }
+
+ if err := cw.ReadCertificate(); err != nil {
+ klog.Error(err, "error re-reading certificate")
+ }
+}
+
+func isWrite(event fsnotify.Event) bool {
+ return event.Op&fsnotify.Write == fsnotify.Write
+}
+
+func isCreate(event fsnotify.Event) bool {
+ return event.Op&fsnotify.Create == fsnotify.Create
+}
+
+func isRemove(event fsnotify.Event) bool {
+ return event.Op&fsnotify.Remove == fsnotify.Remove
+}
diff --git a/pkg/webhook/config.go b/pkg/webhook/config.go
new file mode 100644
index 000000000..6f4ae6044
--- /dev/null
+++ b/pkg/webhook/config.go
@@ -0,0 +1,39 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package webhook
+
+import (
+ "crypto/tls"
+
+ "k8s.io/klog/v2"
+)
+
+// Config contains the server (the webhook) cert and key.
+type Config struct {
+ CertFile string
+ KeyFile string
+}
+
+func configTLS(config Config) *tls.Config {
+ sCert, err := tls.LoadX509KeyPair(config.CertFile, config.KeyFile)
+ if err != nil {
+ klog.Fatal(err)
+ }
+ return &tls.Config{
+ Certificates: []tls.Certificate{sCert},
+ }
+}
diff --git a/pkg/webhook/convert.go b/pkg/webhook/convert.go
new file mode 100644
index 000000000..6e8a08316
--- /dev/null
+++ b/pkg/webhook/convert.go
@@ -0,0 +1,161 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package webhook
+
+import (
+ "encoding/json"
+ "fmt"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/klog/v2"
+)
+
+const (
+ // volumeSnapshotInfoAnnotationName is the name of the annotation
+ // that is used when converting data from the v1beta2 to the v1beta1
+ // API to make the conversion reversible.
+ volumeSnapshotInfoAnnotationName = "groupsnapshot.storage.kubernetes.io/volume-snapshot-info-list"
+)
+
+func convertGroupSnapshotCRD(obj *unstructured.Unstructured, toVersion string) (*unstructured.Unstructured, metav1.Status) {
+ klog.V(2).Info("converting crd")
+
+ convertedObject := obj.DeepCopy()
+ fromVersion := obj.GetAPIVersion()
+ kind := obj.GetKind()
+
+ if toVersion == fromVersion {
+ return nil, statusErrorWithMessage("conversion from a version to itself should not call the webhook: %s", toVersion)
+ }
+
+ if kind != "VolumeGroupSnapshotContent" {
+ return nil, statusErrorWithMessage("unexpected conversion kind %q", kind)
+ }
+
+ const v1beta1Version = "groupsnapshot.storage.k8s.io/v1beta1"
+ const v1beta2Version = "groupsnapshot.storage.k8s.io/v1beta2"
+
+ switch {
+ case fromVersion == v1beta1Version && toVersion == v1beta2Version:
+ if err := convertVolumeGroupSnapshotContentFromV1beta1ToV1beta2(convertedObject); err != nil {
+ return nil, statusErrorWithMessage("%s", err.Error())
+ }
+
+ case fromVersion == v1beta2Version && toVersion == v1beta1Version:
+ if err := convertVolumeGroupSnapshotContentFromV1beta2ToV1beta1(convertedObject); err != nil {
+ return nil, statusErrorWithMessage("%s", err.Error())
+ }
+
+ default:
+ return nil, statusErrorWithMessage("unexpected conversion version from %q to %q", fromVersion, toVersion)
+ }
+
+ return convertedObject, statusSucceed()
+}
+
+func convertVolumeGroupSnapshotContentFromV1beta1ToV1beta2(obj *unstructured.Unstructured) error {
+ annotations := obj.GetAnnotations()
+ if value, ok := annotations[volumeSnapshotInfoAnnotationName]; ok {
+ // We use the annotation to fill the missing fields into the status
+ var slice any
+
+ if err := json.Unmarshal([]byte(value), &slice); err != nil {
+ return fmt.Errorf("unable to deserialize annotation %q: %w", volumeSnapshotInfoAnnotationName, err)
+ }
+
+ if err := unstructured.SetNestedSlice(
+ obj.Object,
+ slice.([]any),
+ "status",
+ "volumeSnapshotInfoList",
+ ); err != nil {
+ return fmt.Errorf("error while setting .status.volumeSnapshotInfoList: %w", err)
+ }
+
+ unstructured.RemoveNestedField(obj.Object, "status", "volumeSnapshotHandlePairList")
+
+ // Remove the tracking annotation
+ delete(annotations, volumeSnapshotInfoAnnotationName)
+ obj.SetAnnotations(annotations)
+ } else {
+ // Rename the old field with the new name
+ volumeSnapshotHandlePairList, found, err := unstructured.NestedSlice(obj.Object, "status", "volumeSnapshotHandlePairList")
+ if err != nil {
+ return fmt.Errorf("unable to traverse for .status.volumeSnapshotHandlePairList: %w", err)
+ }
+ if found {
+ if err := unstructured.SetNestedSlice(
+ obj.Object,
+ volumeSnapshotHandlePairList,
+ "status",
+ "volumeSnapshotInfoList",
+ ); err != nil {
+ return fmt.Errorf("error while setting .status.volumeSnapshotInfoList: %w", err)
+ }
+
+ unstructured.RemoveNestedField(obj.Object, "status", "volumeSnapshotHandlePairList")
+ }
+ }
+ return nil
+}
+
+func convertVolumeGroupSnapshotContentFromV1beta2ToV1beta1(obj *unstructured.Unstructured) error {
+ volumeSnapshotInfoList, found, err := unstructured.NestedSlice(obj.Object, "status", "volumeSnapshotInfoList")
+ if err != nil {
+ return fmt.Errorf("unable to traverse for .status.volumeSnapshotInfoList: %w", err)
+ }
+ if found {
+ // Step 1: attach the existing volumeSnapshotInfoList as an annotation
+ serializedField, err := json.Marshal(volumeSnapshotInfoList)
+ if err != nil {
+ return fmt.Errorf("error while serializing the volumeSnapshotInfoList field: %w", err)
+ }
+
+ annotations := obj.GetAnnotations()
+ if annotations == nil {
+ annotations = make(map[string]string)
+ }
+ annotations[volumeSnapshotInfoAnnotationName] = string(serializedField)
+ obj.SetAnnotations(annotations)
+
+ // Step 2: convert volumeSnapshotInfoList to volumeSnapshotHandlePairList
+ for i, entry := range volumeSnapshotInfoList {
+ if mapEntry, ok := entry.(map[string]interface{}); ok {
+ delete(mapEntry, "creationTime")
+ delete(mapEntry, "readyToUse")
+ delete(mapEntry, "restoreSize")
+ } else {
+ return fmt.Errorf("unexpected content in .status.volumeSnapshotInfoList[%d]: expected map", i)
+ }
+ }
+
+ if err := unstructured.SetNestedSlice(
+ obj.Object,
+ volumeSnapshotInfoList,
+ "status",
+ "volumeSnapshotHandlePairList",
+ ); err != nil {
+ return fmt.Errorf("error while setting .status.volumeSnapshotHandlePairList: %w", err)
+ }
+
+ // Step 3: remove volumeSnapshotInfoList
+ unstructured.RemoveNestedField(obj.Object, "status", "volumeSnapshotInfoList")
+ }
+
+ return nil
+}
diff --git a/pkg/webhook/convert_test.go b/pkg/webhook/convert_test.go
new file mode 100644
index 000000000..9d73c6c83
--- /dev/null
+++ b/pkg/webhook/convert_test.go
@@ -0,0 +1,109 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package webhook
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "k8s.io/apimachinery/pkg/api/equality"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "sigs.k8s.io/yaml"
+)
+
+func TestFromBeta1ToBeta2(t *testing.T) {
+ matches, _ := filepath.Glob("testdata/v1beta1_to_v1beta2/*v1beta1*yaml")
+ for _, beta1FileName := range matches {
+ beta2FileName := strings.ReplaceAll(beta1FileName, "v1beta1.yaml", "v1beta2.yaml")
+
+ t.Run(fmt.Sprintf("%s -> %s", beta1FileName, beta2FileName), func(t *testing.T) {
+ from := fromFile(t, beta1FileName)
+ to := fromFile(t, beta2FileName)
+
+ err := convertVolumeGroupSnapshotContentFromV1beta1ToV1beta2(from)
+ if err != nil {
+ t.Fatalf("conversion failed: %v", err.Error())
+ }
+
+ // The API version is changed by the framework, here we emulate it
+ from.SetAPIVersion(to.GetAPIVersion())
+
+ if !equality.Semantic.DeepEqual(from, to) {
+ fromJSON, _ := json.MarshalIndent(from, "", " ")
+ toJSON, _ := json.MarshalIndent(to, "", " ")
+ t.Errorf("unexpected result %v vs %v", string(fromJSON), string(toJSON))
+ }
+ })
+ }
+}
+
+func TestFromBeta2ToBeta1(t *testing.T) {
+ matches, _ := filepath.Glob("testdata/v1beta2_to_v1beta1/*v1beta1*yaml")
+ for _, beta1FileName := range matches {
+ beta2FileName := strings.ReplaceAll(beta1FileName, "v1beta1.yaml", "v1beta2.yaml")
+
+ t.Run(fmt.Sprintf("%s -> %s", beta2FileName, beta1FileName), func(t *testing.T) {
+ from := fromFile(t, beta2FileName)
+ to := fromFile(t, beta1FileName)
+
+ err := convertVolumeGroupSnapshotContentFromV1beta2ToV1beta1(from)
+ if err != nil {
+ t.Fatalf("conversion failed: %v", err.Error())
+ }
+
+ // The API version is changed by the framework, here we emulate it
+ from.SetAPIVersion(to.GetAPIVersion())
+
+ if !equality.Semantic.DeepEqual(from, to) {
+ fromJSON, _ := json.MarshalIndent(from, "", " ")
+ toJSON, _ := json.MarshalIndent(to, "", " ")
+ t.Errorf("unexpected result %v vs %v", string(fromJSON), string(toJSON))
+ }
+ })
+ }
+}
+
+func fromFile(t *testing.T, fileName string) *unstructured.Unstructured {
+ file, err := os.Open(fileName)
+ if err != nil {
+ t.Fatalf("opening file %q: %v", fileName, err)
+ }
+
+ defer func() {
+ _ = file.Close()
+ }()
+
+ obj := &unstructured.Unstructured{
+ Object: map[string]any{},
+ }
+ data, err := io.ReadAll(file)
+ if err != nil {
+ t.Fatalf("reading file %q: %v", fileName, err)
+ }
+
+ err = yaml.Unmarshal(data, &obj.Object)
+ if err != nil {
+ t.Fatalf("unmarshalling JSON from file %q: %v", fileName, err)
+ }
+
+ return obj
+}
diff --git a/pkg/webhook/framework.go b/pkg/webhook/framework.go
new file mode 100644
index 000000000..36b7e1df5
--- /dev/null
+++ b/pkg/webhook/framework.go
@@ -0,0 +1,201 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package webhook
+
+// Important: this code is largely based on
+// https://github.com/kubernetes/kubernetes/blob/434bfd82814af038ad94d62ebe59b133fcb50506/test/images/agnhost/crd-conversion-webhook/converter/framework.go
+
+import (
+ "fmt"
+ "io"
+ "net/http"
+ "strings"
+
+ "github.com/munnerz/goautoneg"
+
+ "k8s.io/klog/v2"
+
+ "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
+ "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/serializer/json"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+)
+
+// convertFunc is the user defined function for any conversion. The code in this file is a
+// template that can be used for any CR conversion given this function.
+type convertFunc func(Object *unstructured.Unstructured, version string) (*unstructured.Unstructured, metav1.Status)
+
+func statusErrorWithMessage(msg string, params ...interface{}) metav1.Status {
+ return metav1.Status{
+ Message: fmt.Sprintf(msg, params...),
+ Status: metav1.StatusFailure,
+ }
+}
+
+func statusSucceed() metav1.Status {
+ return metav1.Status{
+ Status: metav1.StatusSuccess,
+ }
+}
+
+// doConversionV1 converts the requested objects in the v1 ConversionRequest using the given conversion function and
+// returns a conversion response. Failures are reported with the Reason in the conversion response.
+func doConversionV1(convertRequest *v1.ConversionRequest, convert convertFunc) *v1.ConversionResponse {
+ var convertedObjects []runtime.RawExtension
+ for _, obj := range convertRequest.Objects {
+ cr := unstructured.Unstructured{}
+ if err := cr.UnmarshalJSON(obj.Raw); err != nil {
+ klog.Error(err)
+ return &v1.ConversionResponse{
+ Result: metav1.Status{
+ Message: fmt.Sprintf("failed to unmarshall object (%v) with error: %v", string(obj.Raw), err),
+ Status: metav1.StatusFailure,
+ },
+ }
+ }
+ convertedCR, status := convert(&cr, convertRequest.DesiredAPIVersion)
+ if status.Status != metav1.StatusSuccess {
+ klog.Error(status.String())
+ return &v1.ConversionResponse{
+ Result: status,
+ }
+ }
+ convertedCR.SetAPIVersion(convertRequest.DesiredAPIVersion)
+ convertedObjects = append(convertedObjects, runtime.RawExtension{Object: convertedCR})
+ }
+ return &v1.ConversionResponse{
+ ConvertedObjects: convertedObjects,
+ Result: statusSucceed(),
+ }
+}
+
+func serve(w http.ResponseWriter, r *http.Request, convert convertFunc) {
+ var body []byte
+ if r.Body != nil {
+ if data, err := io.ReadAll(r.Body); err == nil {
+ body = data
+ }
+ }
+
+ contentType := r.Header.Get("Content-Type")
+ serializer := getInputSerializer(contentType)
+ if serializer == nil {
+ msg := fmt.Sprintf("invalid Content-Type header `%s`", contentType)
+ klog.Errorf("%s", msg)
+ http.Error(w, msg, http.StatusBadRequest)
+ return
+ }
+
+ klog.V(2).Infof("handling request: %v", body)
+ obj, gvk, err := serializer.Decode(body, nil, nil)
+ if err != nil {
+ msg := fmt.Sprintf("failed to deserialize body (%v) with error %v", string(body), err)
+ klog.Error(err)
+ http.Error(w, msg, http.StatusBadRequest)
+ return
+ }
+
+ var responseObj runtime.Object
+ switch *gvk {
+ case v1.SchemeGroupVersion.WithKind("ConversionReview"):
+ convertReview, ok := obj.(*v1.ConversionReview)
+ if !ok {
+ msg := fmt.Sprintf("Expected v1.ConversionReview but got: %T", obj)
+ klog.Errorf("%s", msg)
+ http.Error(w, msg, http.StatusBadRequest)
+ return
+ }
+ convertReview.Response = doConversionV1(convertReview.Request, convert)
+ convertReview.Response.UID = convertReview.Request.UID
+ klog.V(2).Info(fmt.Sprintf("sending response: %v", convertReview.Response))
+
+ // reset the request, it is not needed in a response.
+ convertReview.Request = &v1.ConversionRequest{}
+ responseObj = convertReview
+ default:
+ msg := fmt.Sprintf("Unsupported group version kind: %v", gvk)
+ klog.Error(err)
+ http.Error(w, msg, http.StatusBadRequest)
+ return
+ }
+
+ accept := r.Header.Get("Accept")
+ outSerializer := getOutputSerializer(accept)
+ if outSerializer == nil {
+ msg := fmt.Sprintf("invalid accept header `%s`", accept)
+ klog.Errorf("%s", msg)
+ http.Error(w, msg, http.StatusBadRequest)
+ return
+ }
+ err = outSerializer.Encode(responseObj, w)
+ if err != nil {
+ klog.Error(err)
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+}
+
+type mediaType struct {
+ Type, SubType string
+}
+
+var scheme = runtime.NewScheme()
+
+func init() {
+ addToScheme(scheme)
+}
+
+func addToScheme(scheme *runtime.Scheme) {
+ utilruntime.Must(v1.AddToScheme(scheme))
+ utilruntime.Must(v1beta1.AddToScheme(scheme))
+}
+
+var serializers = map[mediaType]runtime.Serializer{
+ {"application", "json"}: json.NewSerializer(json.DefaultMetaFactory, scheme, scheme, false),
+ {"application", "yaml"}: json.NewYAMLSerializer(json.DefaultMetaFactory, scheme, scheme),
+}
+
+func getInputSerializer(contentType string) runtime.Serializer {
+ parts := strings.SplitN(contentType, "/", 2)
+ if len(parts) != 2 {
+ return nil
+ }
+ return serializers[mediaType{parts[0], parts[1]}]
+}
+
+func getOutputSerializer(accept string) runtime.Serializer {
+ if len(accept) == 0 {
+ return serializers[mediaType{"application", "json"}]
+ }
+
+ clauses := goautoneg.ParseAccept(accept)
+ for _, clause := range clauses {
+ for k, v := range serializers {
+ switch {
+ case clause.Type == k.Type && clause.SubType == k.SubType,
+ clause.Type == k.Type && clause.SubType == "*",
+ clause.Type == "*" && clause.SubType == "*":
+ return v
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/pkg/webhook/testdata/v1beta1_to_v1beta2/annotation_no_status_v1beta1.yaml b/pkg/webhook/testdata/v1beta1_to_v1beta2/annotation_no_status_v1beta1.yaml
new file mode 100644
index 000000000..294b86904
--- /dev/null
+++ b/pkg/webhook/testdata/v1beta1_to_v1beta2/annotation_no_status_v1beta1.yaml
@@ -0,0 +1,25 @@
+---
+apiVersion: groupsnapshot.storage.k8s.io/v1beta1
+kind: VolumeGroupSnapshotContent
+metadata:
+ name: new-groupsnapshot-demo
+ annotations:
+ groupsnapshot.storage.kubernetes.io/volume-snapshot-info-list: |
+ [
+ {
+ "snapshotHandle": "feae7ce1-d339-11ef-9750-6a7695ce0383",
+ "volumeHandle": "72a7e66d-d337-11ef-9750-6a7695ce0383",
+ "creationTime": 23,
+ "readyToUse": true,
+ "restoreSize": 12
+ },
+ {
+ "snapshotHandle": "ff0f30a2-d339-11ef-9750-6a7695ce0383",
+ "volumeHandle": "72a89a79-d337-11ef-9750-6a7695ce0383",
+ "creationTime": 23,
+ "readyToUse": true,
+ "restoreSize": 12
+ }
+ ]
+spec: {}
+
diff --git a/pkg/webhook/testdata/v1beta1_to_v1beta2/annotation_no_status_v1beta2.yaml b/pkg/webhook/testdata/v1beta1_to_v1beta2/annotation_no_status_v1beta2.yaml
new file mode 100644
index 000000000..cbd26898c
--- /dev/null
+++ b/pkg/webhook/testdata/v1beta1_to_v1beta2/annotation_no_status_v1beta2.yaml
@@ -0,0 +1,19 @@
+apiVersion: groupsnapshot.storage.k8s.io/v1beta2
+kind: VolumeGroupSnapshotContent
+metadata:
+ name: new-groupsnapshot-demo
+ annotations: {}
+spec: {}
+status:
+ volumeSnapshotInfoList:
+ - snapshotHandle: feae7ce1-d339-11ef-9750-6a7695ce0383
+ volumeHandle: 72a7e66d-d337-11ef-9750-6a7695ce0383
+ creationTime: 23
+ readyToUse: true
+ restoreSize: 12
+ - snapshotHandle: ff0f30a2-d339-11ef-9750-6a7695ce0383
+ volumeHandle: 72a89a79-d337-11ef-9750-6a7695ce0383
+ creationTime: 23
+ readyToUse: true
+ restoreSize: 12
+
diff --git a/pkg/webhook/testdata/v1beta1_to_v1beta2/annotation_status_v1beta1.yaml b/pkg/webhook/testdata/v1beta1_to_v1beta2/annotation_status_v1beta1.yaml
new file mode 100644
index 000000000..2aa648c34
--- /dev/null
+++ b/pkg/webhook/testdata/v1beta1_to_v1beta2/annotation_status_v1beta1.yaml
@@ -0,0 +1,33 @@
+---
+apiVersion: groupsnapshot.storage.k8s.io/v1beta1
+kind: VolumeGroupSnapshotContent
+metadata:
+ name: new-groupsnapshot-demo
+ annotations:
+ groupsnapshot.storage.kubernetes.io/volume-snapshot-info-list: |
+ [
+ {
+ "snapshotHandle": "feae7ce1-d339-11ef-9750-6a7695ce0383",
+ "volumeHandle": "72a7e66d-d337-11ef-9750-6a7695ce0383",
+ "creationTime": 23,
+ "readyToUse": true,
+ "restoreSize": 12
+ },
+ {
+ "snapshotHandle": "ff0f30a2-d339-11ef-9750-6a7695ce0383",
+ "volumeHandle": "72a89a79-d337-11ef-9750-6a7695ce0383",
+ "creationTime": 23,
+ "readyToUse": true,
+ "restoreSize": 12
+ }
+ ]
+spec: {}
+status:
+ readyToUse: true
+ volumeGroupSnapshotHandle: feae7cc9-d339-11ef-9750-6a7695ce0383
+ volumeSnapshotHandlePairList:
+ - snapshotHandle: feae7ce1-d339-11ef-9750-6a7695ce0383
+ volumeHandle: 72a7e66d-d337-11ef-9750-6a7695ce0383
+ - snapshotHandle: ff0f30a2-d339-11ef-9750-6a7695ce0383
+ volumeHandle: 72a89a79-d337-11ef-9750-6a7695ce0383
+
diff --git a/pkg/webhook/testdata/v1beta1_to_v1beta2/annotation_status_v1beta2.yaml b/pkg/webhook/testdata/v1beta1_to_v1beta2/annotation_status_v1beta2.yaml
new file mode 100644
index 000000000..f370fe5b9
--- /dev/null
+++ b/pkg/webhook/testdata/v1beta1_to_v1beta2/annotation_status_v1beta2.yaml
@@ -0,0 +1,21 @@
+---
+apiVersion: groupsnapshot.storage.k8s.io/v1beta2
+kind: VolumeGroupSnapshotContent
+metadata:
+ name: new-groupsnapshot-demo
+ annotations: {}
+spec: {}
+status:
+ readyToUse: true
+ volumeGroupSnapshotHandle: feae7cc9-d339-11ef-9750-6a7695ce0383
+ volumeSnapshotInfoList:
+ - snapshotHandle: feae7ce1-d339-11ef-9750-6a7695ce0383
+ volumeHandle: 72a7e66d-d337-11ef-9750-6a7695ce0383
+ creationTime: 23
+ readyToUse: true
+ restoreSize: 12
+ - snapshotHandle: ff0f30a2-d339-11ef-9750-6a7695ce0383
+ volumeHandle: 72a89a79-d337-11ef-9750-6a7695ce0383
+ creationTime: 23
+ readyToUse: true
+ restoreSize: 12
diff --git a/pkg/webhook/testdata/v1beta1_to_v1beta2/no_annotation_no_status_v1beta1.yaml b/pkg/webhook/testdata/v1beta1_to_v1beta2/no_annotation_no_status_v1beta1.yaml
new file mode 100644
index 000000000..2c4621461
--- /dev/null
+++ b/pkg/webhook/testdata/v1beta1_to_v1beta2/no_annotation_no_status_v1beta1.yaml
@@ -0,0 +1,7 @@
+---
+apiVersion: groupsnapshot.storage.k8s.io/v1beta1
+kind: VolumeGroupSnapshotContent
+metadata:
+ name: new-groupsnapshot-demo
+spec: {}
+
diff --git a/pkg/webhook/testdata/v1beta1_to_v1beta2/no_annotation_no_status_v1beta2.yaml b/pkg/webhook/testdata/v1beta1_to_v1beta2/no_annotation_no_status_v1beta2.yaml
new file mode 100644
index 000000000..c0e9e65f9
--- /dev/null
+++ b/pkg/webhook/testdata/v1beta1_to_v1beta2/no_annotation_no_status_v1beta2.yaml
@@ -0,0 +1,8 @@
+---
+apiVersion: groupsnapshot.storage.k8s.io/v1beta2
+kind: VolumeGroupSnapshotContent
+metadata:
+ name: new-groupsnapshot-demo
+spec: {}
+
+
diff --git a/pkg/webhook/testdata/v1beta1_to_v1beta2/no_annotation_status_v1beta1.yaml b/pkg/webhook/testdata/v1beta1_to_v1beta2/no_annotation_status_v1beta1.yaml
new file mode 100644
index 000000000..7685410c2
--- /dev/null
+++ b/pkg/webhook/testdata/v1beta1_to_v1beta2/no_annotation_status_v1beta1.yaml
@@ -0,0 +1,15 @@
+---
+apiVersion: groupsnapshot.storage.k8s.io/v1beta1
+kind: VolumeGroupSnapshotContent
+metadata:
+ name: new-groupsnapshot-demo
+spec: {}
+status:
+ readyToUse: true
+ volumeGroupSnapshotHandle: feae7cc9-d339-11ef-9750-6a7695ce0383
+ volumeSnapshotHandlePairList:
+ - snapshotHandle: feae7ce1-d339-11ef-9750-6a7695ce0383
+ volumeHandle: 72a7e66d-d337-11ef-9750-6a7695ce0383
+ - snapshotHandle: ff0f30a2-d339-11ef-9750-6a7695ce0383
+ volumeHandle: 72a89a79-d337-11ef-9750-6a7695ce0383
+
diff --git a/pkg/webhook/testdata/v1beta1_to_v1beta2/no_annotation_status_v1beta2.yaml b/pkg/webhook/testdata/v1beta1_to_v1beta2/no_annotation_status_v1beta2.yaml
new file mode 100644
index 000000000..bc28cbd79
--- /dev/null
+++ b/pkg/webhook/testdata/v1beta1_to_v1beta2/no_annotation_status_v1beta2.yaml
@@ -0,0 +1,14 @@
+---
+apiVersion: groupsnapshot.storage.k8s.io/v1beta2
+kind: VolumeGroupSnapshotContent
+metadata:
+ name: new-groupsnapshot-demo
+spec: {}
+status:
+ readyToUse: true
+ volumeGroupSnapshotHandle: feae7cc9-d339-11ef-9750-6a7695ce0383
+ volumeSnapshotInfoList:
+ - snapshotHandle: feae7ce1-d339-11ef-9750-6a7695ce0383
+ volumeHandle: 72a7e66d-d337-11ef-9750-6a7695ce0383
+ - snapshotHandle: ff0f30a2-d339-11ef-9750-6a7695ce0383
+ volumeHandle: 72a89a79-d337-11ef-9750-6a7695ce0383
diff --git a/pkg/webhook/testdata/v1beta2_to_v1beta1/annotation_status_v1beta1.yaml b/pkg/webhook/testdata/v1beta2_to_v1beta1/annotation_status_v1beta1.yaml
new file mode 100644
index 000000000..e66e1a239
--- /dev/null
+++ b/pkg/webhook/testdata/v1beta2_to_v1beta1/annotation_status_v1beta1.yaml
@@ -0,0 +1,17 @@
+---
+apiVersion: groupsnapshot.storage.k8s.io/v1beta1
+kind: VolumeGroupSnapshotContent
+metadata:
+ name: new-groupsnapshot-demo
+ annotations:
+ groupsnapshot.storage.kubernetes.io/volume-snapshot-info-list: "[{\"creationTime\":23,\"readyToUse\":true,\"restoreSize\":12,\"snapshotHandle\":\"feae7ce1-d339-11ef-9750-6a7695ce0383\",\"volumeHandle\":\"72a7e66d-d337-11ef-9750-6a7695ce0383\"},{\"creationTime\":23,\"readyToUse\":true,\"restoreSize\":12,\"snapshotHandle\":\"ff0f30a2-d339-11ef-9750-6a7695ce0383\",\"volumeHandle\":\"72a89a79-d337-11ef-9750-6a7695ce0383\"}]"
+spec: {}
+status:
+ readyToUse: true
+ volumeGroupSnapshotHandle: feae7cc9-d339-11ef-9750-6a7695ce0383
+ volumeSnapshotHandlePairList:
+ - snapshotHandle: feae7ce1-d339-11ef-9750-6a7695ce0383
+ volumeHandle: 72a7e66d-d337-11ef-9750-6a7695ce0383
+ - snapshotHandle: ff0f30a2-d339-11ef-9750-6a7695ce0383
+ volumeHandle: 72a89a79-d337-11ef-9750-6a7695ce0383
+
diff --git a/pkg/webhook/testdata/v1beta2_to_v1beta1/annotation_status_v1beta2.yaml b/pkg/webhook/testdata/v1beta2_to_v1beta1/annotation_status_v1beta2.yaml
new file mode 100644
index 000000000..f370fe5b9
--- /dev/null
+++ b/pkg/webhook/testdata/v1beta2_to_v1beta1/annotation_status_v1beta2.yaml
@@ -0,0 +1,21 @@
+---
+apiVersion: groupsnapshot.storage.k8s.io/v1beta2
+kind: VolumeGroupSnapshotContent
+metadata:
+ name: new-groupsnapshot-demo
+ annotations: {}
+spec: {}
+status:
+ readyToUse: true
+ volumeGroupSnapshotHandle: feae7cc9-d339-11ef-9750-6a7695ce0383
+ volumeSnapshotInfoList:
+ - snapshotHandle: feae7ce1-d339-11ef-9750-6a7695ce0383
+ volumeHandle: 72a7e66d-d337-11ef-9750-6a7695ce0383
+ creationTime: 23
+ readyToUse: true
+ restoreSize: 12
+ - snapshotHandle: ff0f30a2-d339-11ef-9750-6a7695ce0383
+ volumeHandle: 72a89a79-d337-11ef-9750-6a7695ce0383
+ creationTime: 23
+ readyToUse: true
+ restoreSize: 12
diff --git a/pkg/webhook/testdata/v1beta2_to_v1beta1/no_annotation_no_status_v1beta1.yaml b/pkg/webhook/testdata/v1beta2_to_v1beta1/no_annotation_no_status_v1beta1.yaml
new file mode 100644
index 000000000..2c4621461
--- /dev/null
+++ b/pkg/webhook/testdata/v1beta2_to_v1beta1/no_annotation_no_status_v1beta1.yaml
@@ -0,0 +1,7 @@
+---
+apiVersion: groupsnapshot.storage.k8s.io/v1beta1
+kind: VolumeGroupSnapshotContent
+metadata:
+ name: new-groupsnapshot-demo
+spec: {}
+
diff --git a/pkg/webhook/testdata/v1beta2_to_v1beta1/no_annotation_no_status_v1beta2.yaml b/pkg/webhook/testdata/v1beta2_to_v1beta1/no_annotation_no_status_v1beta2.yaml
new file mode 100644
index 000000000..c0e9e65f9
--- /dev/null
+++ b/pkg/webhook/testdata/v1beta2_to_v1beta1/no_annotation_no_status_v1beta2.yaml
@@ -0,0 +1,8 @@
+---
+apiVersion: groupsnapshot.storage.k8s.io/v1beta2
+kind: VolumeGroupSnapshotContent
+metadata:
+ name: new-groupsnapshot-demo
+spec: {}
+
+
diff --git a/pkg/webhook/webhook.go b/pkg/webhook/webhook.go
new file mode 100644
index 000000000..4fc0fdcb4
--- /dev/null
+++ b/pkg/webhook/webhook.go
@@ -0,0 +1,57 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package webhook
+
+import (
+ "context"
+ "crypto/tls"
+ "fmt"
+ "net/http"
+
+ "k8s.io/klog/v2"
+)
+
+func StartServer(
+ ctx context.Context,
+ tlsConfig *tls.Config,
+ cw *CertWatcher,
+ port int,
+) error {
+ go func() {
+ klog.Info("Starting certificate watcher")
+ if err := cw.Start(ctx); err != nil {
+ klog.Errorf("certificate watcher error: %v", err)
+ }
+ }()
+
+ mux := http.NewServeMux()
+ mux.HandleFunc("/readyz", func(w http.ResponseWriter, req *http.Request) { w.Write([]byte("ok")) })
+ mux.HandleFunc("/convert", func(w http.ResponseWriter, req *http.Request) { serve(w, req, convertGroupSnapshotCRD) })
+
+ srv := &http.Server{
+ Handler: mux,
+ TLSConfig: tlsConfig,
+ }
+
+ // listener is always closed by srv.Serve
+ listener, err := tls.Listen("tcp", fmt.Sprintf(":%d", port), tlsConfig)
+ if err != nil {
+ return err
+ }
+
+ return srv.Serve(listener)
+}
diff --git a/pkg/webhook/webhook_test.go b/pkg/webhook/webhook_test.go
new file mode 100644
index 000000000..2165f4246
--- /dev/null
+++ b/pkg/webhook/webhook_test.go
@@ -0,0 +1,197 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package webhook
+
+import (
+ "context"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/tls"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/pem"
+ "fmt"
+ "math/big"
+ "net"
+ "os"
+ "path"
+ "testing"
+ "time"
+)
+
+func TestWebhookCertReload(t *testing.T) {
+ // Initialize test space
+ tmpDir := path.Join(t.TempDir(), "/webhook-cert-tests")
+ certFile := tmpDir + "/tls.crt"
+ keyFile := tmpDir + "/tls.key"
+ port := 30443
+ err := os.Mkdir(tmpDir, 0o777)
+ if err != nil && err != os.ErrExist {
+ t.Errorf("unexpected error occurred while creating tmp dir: %v", err)
+ }
+ defer func() {
+ err := os.RemoveAll(tmpDir)
+ if err != nil {
+ t.Errorf("unexpected error occurred while deleting certs: %v", err)
+ }
+ }()
+ err = generateTestCertKeyPair(t, certFile, keyFile)
+ if err != nil {
+ t.Errorf("unexpected error occurred while generating test certs: %v", err)
+ }
+
+ // Start test server
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ cw, err := NewCertWatcher(certFile, keyFile)
+ if err != nil {
+ t.Errorf("failed to initialize new cert watcher: %v", err)
+ }
+ tlsConfig := &tls.Config{
+ GetCertificate: cw.GetCertificate,
+ }
+ go func() {
+ err := StartServer(ctx,
+ tlsConfig,
+ cw,
+ port,
+ )
+ if err != nil {
+ panic(err)
+ }
+ }()
+ time.Sleep(250 * time.Millisecond) // Give some time for watcher to start
+
+ // TC: Original cert should not change with no file changes
+ originalCert, err := tlsConfig.GetCertificate(nil)
+ if err != nil {
+ t.Errorf("unexpected error occurred while getting cert: %v", err)
+ }
+ originalCertStr := string(originalCert.Certificate[0])
+ originalKey := originalCert.PrivateKey.(*rsa.PrivateKey)
+
+ newCert, err := tlsConfig.GetCertificate(nil) // get certificate again
+ if err != nil {
+ t.Errorf("unexpected error occurred while getting newcert: %v", err)
+ }
+ if string(newCert.Certificate[0]) != originalCertStr {
+ t.Error("new cert was updated when it should not have been")
+ }
+ newKey := newCert.PrivateKey.(*rsa.PrivateKey)
+ if !newKey.Equal(originalKey) {
+ t.Error("new key was updated when it should not have been")
+ }
+
+ // TC: Certificate should consistently change with a file change
+ for i := 0; i < 5; i++ {
+ // Generate new key/cert
+ err = generateTestCertKeyPair(t, certFile, keyFile)
+ if err != nil {
+ t.Errorf("unexpected error occurred while generating test certs: %v", err)
+ }
+
+ // Wait for certwatcher to update
+ time.Sleep(250 * time.Millisecond)
+ newCert, err = tlsConfig.GetCertificate(nil)
+ if err != nil {
+ t.Errorf("unexpected error occurred while getting newcert: %v", err)
+ }
+ if string(newCert.Certificate[0]) == originalCertStr {
+ t.Errorf("new cert was not updated")
+ }
+
+ newKey = newCert.PrivateKey.(*rsa.PrivateKey)
+ if newKey.Equal(originalKey) {
+ t.Error("new key was not updated")
+ }
+
+ originalCertStr = string(newCert.Certificate[0])
+ originalKey = newKey
+ }
+}
+
+// generateTestCertKeyPair generates a new random test key/crt and writes it to tmpDir
+// based on https://golang.org/src/crypto/tls/generate_cert.go
+func generateTestCertKeyPair(t *testing.T, certPath, keyPath string) error {
+ notBefore := time.Now()
+ serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
+ serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
+ if err != nil {
+ return fmt.Errorf("Failed to generate serial number: %v", err)
+ }
+
+ var priv interface{}
+ priv, err = rsa.GenerateKey(rand.Reader, 4096)
+ if err != nil {
+ return fmt.Errorf("Failed to generate key: %v", err)
+ }
+ keyUsage := x509.KeyUsageDigitalSignature
+ if _, isRSA := priv.(*rsa.PrivateKey); isRSA {
+ keyUsage |= x509.KeyUsageKeyEncipherment
+ }
+ randomOrganizationStr := time.Now().String()
+ template := x509.Certificate{
+ SerialNumber: serialNumber,
+ Subject: pkix.Name{
+ Organization: []string{randomOrganizationStr},
+ },
+ NotBefore: notBefore,
+ NotAfter: time.Now().Add(1 * time.Hour),
+
+ KeyUsage: keyUsage,
+ ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
+ BasicConstraintsValid: true,
+ IPAddresses: []net.IP{net.ParseIP("127.0.0.1")},
+ DNSNames: []string{"127.0.0.1"},
+ }
+
+ rk := priv.(*rsa.PrivateKey)
+ derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &rk.PublicKey, priv)
+ if err != nil {
+ return fmt.Errorf("Failed to create certificate: %v", err)
+ }
+
+ certOut, err := os.Create(certPath)
+ if err != nil {
+ return fmt.Errorf("Failed to open tls.crt for writing: %v", err)
+ }
+ if err := pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}); err != nil {
+ return fmt.Errorf("Failed to write data to tls.crt: %v", err)
+ }
+ if err := certOut.Close(); err != nil {
+ return fmt.Errorf("Error closing tls.crt: %v", err)
+ }
+ fmt.Printf("wrote new cert: %s\n", certPath)
+
+ keyOut, err := os.OpenFile(keyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o600)
+ if err != nil {
+ return fmt.Errorf("Failed to open tls.key for writing: %v", err)
+ }
+ privBytes, err := x509.MarshalPKCS8PrivateKey(priv)
+ if err != nil {
+ return fmt.Errorf("Unable to marshal private key: %v", err)
+ }
+ if err := pem.Encode(keyOut, &pem.Block{Type: "PRIVATE KEY", Bytes: privBytes}); err != nil {
+ return fmt.Errorf("Failed to write data to tls.key: %v", err)
+ }
+ if err := keyOut.Close(); err != nil {
+ return fmt.Errorf("Error closing tls.key: %v", err)
+ }
+ fmt.Printf("wrote new key: %s\n", keyPath)
+
+ return nil
+}
diff --git a/release-tools/.github/workflows/codespell.yml b/release-tools/.github/workflows/codespell.yml
index e74edcef5..9e1ca1033 100644
--- a/release-tools/.github/workflows/codespell.yml
+++ b/release-tools/.github/workflows/codespell.yml
@@ -8,7 +8,7 @@ jobs:
name: Check for spelling errors
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v5
- uses: codespell-project/actions-codespell@master
with:
check_filenames: true
diff --git a/release-tools/.github/workflows/trivy.yaml b/release-tools/.github/workflows/trivy.yaml
index 472984780..70c6a9d1f 100644
--- a/release-tools/.github/workflows/trivy.yaml
+++ b/release-tools/.github/workflows/trivy.yaml
@@ -10,7 +10,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
- uses: actions/checkout@v4
+ uses: actions/checkout@v5
- name: Get Go version
id: go-version
diff --git a/release-tools/KUBERNETES_CSI_OWNERS_ALIASES b/release-tools/KUBERNETES_CSI_OWNERS_ALIASES
index 6e46cf290..f7bc70181 100644
--- a/release-tools/KUBERNETES_CSI_OWNERS_ALIASES
+++ b/release-tools/KUBERNETES_CSI_OWNERS_ALIASES
@@ -19,7 +19,6 @@ aliases:
kubernetes-csi-reviewers:
- andyzhangx
- carlory
- - chrishenzie
- ggriffiths
- gnufied
- humblec
diff --git a/release-tools/cloudbuild.yaml b/release-tools/cloudbuild.yaml
index 1693df84b..a6f86fa3a 100644
--- a/release-tools/cloudbuild.yaml
+++ b/release-tools/cloudbuild.yaml
@@ -26,7 +26,7 @@ steps:
# The image must contain bash and curl. Ideally it should also contain
# the desired version of Go (currently defined in release-tools/prow.sh),
# but that just speeds up the build and is not required.
- - name: 'gcr.io/k8s-testimages/gcb-docker-gcloud:v20240718-5ef92b5c36'
+ - name: 'gcr.io/k8s-staging-test-infra/gcb-docker-gcloud:v20240718-5ef92b5c36'
entrypoint: ./.cloudbuild.sh
env:
- GIT_TAG=${_GIT_TAG}
diff --git a/release-tools/prow.sh b/release-tools/prow.sh
index b2628d0ec..b4e347304 100755
--- a/release-tools/prow.sh
+++ b/release-tools/prow.sh
@@ -86,7 +86,7 @@ configvar CSI_PROW_BUILD_PLATFORMS "linux amd64 amd64; linux ppc64le ppc64le -pp
# which is disabled with GOFLAGS=-mod=vendor).
configvar GOFLAGS_VENDOR "$( [ -d vendor ] && echo '-mod=vendor' )" "Go flags for using the vendor directory"
-configvar CSI_PROW_GO_VERSION_BUILD "1.24.2" "Go version for building the component" # depends on component's source code
+configvar CSI_PROW_GO_VERSION_BUILD "1.24.6" "Go version for building the component" # depends on component's source code
configvar CSI_PROW_GO_VERSION_E2E "" "override Go version for building the Kubernetes E2E test suite" # normally doesn't need to be set, see install_e2e
configvar CSI_PROW_GO_VERSION_SANITY "${CSI_PROW_GO_VERSION_BUILD}" "Go version for building the csi-sanity test suite" # depends on CSI_PROW_SANITY settings below
configvar CSI_PROW_GO_VERSION_KIND "${CSI_PROW_GO_VERSION_BUILD}" "Go version for building 'kind'" # depends on CSI_PROW_KIND_VERSION below
@@ -199,7 +199,7 @@ kindest/node:v1.26.15@sha256:c79602a44b4056d7e48dc20f7504350f1e87530fe953428b792
# If the deployment script is called with CSI_PROW_TEST_DRIVER= as
# environment variable, then it must write a suitable test driver configuration
# into that file in addition to installing the driver.
-configvar CSI_PROW_DRIVER_VERSION "v1.15.0" "CSI driver version"
+configvar CSI_PROW_DRIVER_VERSION "v1.17.0" "CSI driver version"
configvar CSI_PROW_DRIVER_REPO https://github.com/kubernetes-csi/csi-driver-host-path "CSI driver repo"
configvar CSI_PROW_DEPLOYMENT "" "deployment"
configvar CSI_PROW_DEPLOYMENT_SUFFIX "" "additional suffix in kubernetes-x.yy[suffix].yaml files"
diff --git a/vendor/cel.dev/expr/.bazelversion b/vendor/cel.dev/expr/.bazelversion
new file mode 100644
index 000000000..13c50892b
--- /dev/null
+++ b/vendor/cel.dev/expr/.bazelversion
@@ -0,0 +1,2 @@
+7.3.2
+# Keep this pinned version in parity with cel-go
diff --git a/vendor/cel.dev/expr/.gitattributes b/vendor/cel.dev/expr/.gitattributes
new file mode 100644
index 000000000..3de1ec213
--- /dev/null
+++ b/vendor/cel.dev/expr/.gitattributes
@@ -0,0 +1,2 @@
+*.pb.go linguist-generated=true
+*.pb.go -diff -merge
diff --git a/vendor/cel.dev/expr/.gitignore b/vendor/cel.dev/expr/.gitignore
new file mode 100644
index 000000000..0d4fed27c
--- /dev/null
+++ b/vendor/cel.dev/expr/.gitignore
@@ -0,0 +1,2 @@
+bazel-*
+MODULE.bazel.lock
diff --git a/vendor/cel.dev/expr/BUILD.bazel b/vendor/cel.dev/expr/BUILD.bazel
new file mode 100644
index 000000000..37d8adc95
--- /dev/null
+++ b/vendor/cel.dev/expr/BUILD.bazel
@@ -0,0 +1,34 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+package(default_visibility = ["//visibility:public"])
+
+licenses(["notice"]) # Apache 2.0
+
+go_library(
+ name = "expr",
+ srcs = [
+ "checked.pb.go",
+ "eval.pb.go",
+ "explain.pb.go",
+ "syntax.pb.go",
+ "value.pb.go",
+ ],
+ importpath = "cel.dev/expr",
+ visibility = ["//visibility:public"],
+ deps = [
+ "@org_golang_google_genproto_googleapis_rpc//status:go_default_library",
+ "@org_golang_google_protobuf//reflect/protoreflect",
+ "@org_golang_google_protobuf//runtime/protoimpl",
+ "@org_golang_google_protobuf//types/known/anypb",
+ "@org_golang_google_protobuf//types/known/durationpb",
+ "@org_golang_google_protobuf//types/known/emptypb",
+ "@org_golang_google_protobuf//types/known/structpb",
+ "@org_golang_google_protobuf//types/known/timestamppb",
+ ],
+)
+
+alias(
+ name = "go_default_library",
+ actual = ":expr",
+ visibility = ["//visibility:public"],
+)
diff --git a/vendor/cel.dev/expr/CODE_OF_CONDUCT.md b/vendor/cel.dev/expr/CODE_OF_CONDUCT.md
new file mode 100644
index 000000000..59908e2d8
--- /dev/null
+++ b/vendor/cel.dev/expr/CODE_OF_CONDUCT.md
@@ -0,0 +1,25 @@
+# Contributor Code of Conduct
+## Version 0.1.1 (adapted from 0.3b-angular)
+
+As contributors and maintainers of the Common Expression Language
+(CEL) project, we pledge to respect everyone who contributes by
+posting issues, updating documentation, submitting pull requests,
+providing feedback in comments, and any other activities.
+
+Communication through any of CEL's channels (GitHub, Gitter, IRC,
+mailing lists, Google+, Twitter, etc.) must be constructive and never
+resort to personal attacks, trolling, public or private harassment,
+insults, or other unprofessional conduct.
+
+We promise to extend courtesy and respect to everyone involved in this
+project regardless of gender, gender identity, sexual orientation,
+disability, age, race, ethnicity, religion, or level of experience. We
+expect anyone contributing to the project to do the same.
+
+If any member of the community violates this code of conduct, the
+maintainers of the CEL project may take action, removing issues,
+comments, and PRs or blocking accounts as deemed appropriate.
+
+If you are subject to or witness unacceptable behavior, or have any
+other concerns, please email us at
+[cel-conduct@google.com](mailto:cel-conduct@google.com).
diff --git a/vendor/cel.dev/expr/CONTRIBUTING.md b/vendor/cel.dev/expr/CONTRIBUTING.md
new file mode 100644
index 000000000..8f5fd5c31
--- /dev/null
+++ b/vendor/cel.dev/expr/CONTRIBUTING.md
@@ -0,0 +1,32 @@
+# How to Contribute
+
+We'd love to accept your patches and contributions to this project. There are a
+few guidelines you need to follow.
+
+## Contributor License Agreement
+
+Contributions to this project must be accompanied by a Contributor License
+Agreement. You (or your employer) retain the copyright to your contribution,
+this simply gives us permission to use and redistribute your contributions as
+part of the project. Head over to to see
+your current agreements on file or to sign a new one.
+
+You generally only need to submit a CLA once, so if you've already submitted one
+(even if it was for a different project), you probably don't need to do it
+again.
+
+## Code reviews
+
+All submissions, including submissions by project members, require review. We
+use GitHub pull requests for this purpose. Consult
+[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more
+information on using pull requests.
+
+## What to expect from maintainers
+
+Expect maintainers to respond to new issues or pull requests within a week.
+For outstanding and ongoing issues and particularly for long-running
+pull requests, expect the maintainers to review within a week of a
+contributor asking for a new review. There is no commitment to resolution --
+merging or closing a pull request, or fixing or closing an issue -- because some
+issues will require more discussion than others.
diff --git a/vendor/cel.dev/expr/GOVERNANCE.md b/vendor/cel.dev/expr/GOVERNANCE.md
new file mode 100644
index 000000000..0a525bc17
--- /dev/null
+++ b/vendor/cel.dev/expr/GOVERNANCE.md
@@ -0,0 +1,43 @@
+# Project Governance
+
+This document defines the governance process for the CEL language. CEL is
+Google-developed, but openly governed. Major contributors to the CEL
+specification and its corresponding implementations constitute the CEL
+Language Council. New members may be added by a unanimous vote of the
+Council.
+
+The MAINTAINERS.md file lists the members of the CEL Language Council, and
+unofficially indicates the "areas of expertise" of each member with respect
+to the publicly available CEL repos.
+
+## Code Changes
+
+Code changes must follow the standard pull request (PR) model documented in the
+CONTRIBUTING.md for each CEL repo. All fixes and features must be reviewed by a
+maintainer. The maintainer reserves the right to request that any feature
+request (FR) or PR be reviewed by the language council.
+
+## Syntax and Semantic Changes
+
+Syntactic and semantic changes must be reviewed by the CEL Language Council.
+Maintainers may also request language council review at their discretion.
+
+The review process is as follows:
+
+- Create a Feature Request in the CEL-Spec repo. The feature description will
+ serve as an abstract for the detailed design document.
+- Co-develop a design document with the Language Council.
+- Once the proposer gives the design document approval, the document will be
+ linked to the FR in the CEL-Spec repo and opened for comments to members of
+ the cel-lang-discuss@googlegroups.com.
+- The Language Council will review the design doc at the next council meeting
+ (once every three weeks) and the council decision included in the document.
+
+If the proposal is approved, the spec will be updated by a maintainer (if
+applicable) and a rationale will be included in the CEL-Spec wiki to ensure
+future developers may follow CEL's growth and direction over time.
+
+Approved proposals may be implemented by the proposer or by the maintainers as
+the parties see fit. At the discretion of the maintainer, changes from the
+approved design are permitted during implementation if they improve the user
+experience and clarity of the feature.
diff --git a/vendor/cel.dev/expr/LICENSE b/vendor/cel.dev/expr/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/cel.dev/expr/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/cel.dev/expr/MAINTAINERS.md b/vendor/cel.dev/expr/MAINTAINERS.md
new file mode 100644
index 000000000..1ed2eb8ab
--- /dev/null
+++ b/vendor/cel.dev/expr/MAINTAINERS.md
@@ -0,0 +1,13 @@
+# CEL Language Council
+
+| Name | Company | Area of Expertise |
+|-----------------|--------------|-------------------|
+| Alfred Fuller | Facebook | cel-cpp, cel-spec |
+| Jim Larson | Google | cel-go, cel-spec |
+| Matthais Blume | Google | cel-spec |
+| Tristan Swadell | Google | cel-go, cel-spec |
+
+## Emeritus
+
+* Sanjay Ghemawat (Google)
+* Wolfgang Grieskamp (Facebook)
diff --git a/vendor/cel.dev/expr/MODULE.bazel b/vendor/cel.dev/expr/MODULE.bazel
new file mode 100644
index 000000000..85ac9ff61
--- /dev/null
+++ b/vendor/cel.dev/expr/MODULE.bazel
@@ -0,0 +1,74 @@
+module(
+ name = "cel-spec",
+)
+
+bazel_dep(
+ name = "bazel_skylib",
+ version = "1.7.1",
+)
+bazel_dep(
+ name = "gazelle",
+ version = "0.39.1",
+ repo_name = "bazel_gazelle",
+)
+bazel_dep(
+ name = "googleapis",
+ version = "0.0.0-20241220-5e258e33.bcr.1",
+ repo_name = "com_google_googleapis",
+)
+bazel_dep(
+ name = "googleapis-cc",
+ version = "1.0.0",
+)
+bazel_dep(
+ name = "googleapis-java",
+ version = "1.0.0",
+)
+bazel_dep(
+ name = "googleapis-go",
+ version = "1.0.0",
+)
+bazel_dep(
+ name = "protobuf",
+ version = "27.0",
+ repo_name = "com_google_protobuf",
+)
+bazel_dep(
+ name = "rules_cc",
+ version = "0.0.17",
+)
+bazel_dep(
+ name = "rules_go",
+ version = "0.53.0",
+ repo_name = "io_bazel_rules_go",
+)
+bazel_dep(
+ name = "rules_java",
+ version = "7.6.5",
+)
+bazel_dep(
+ name = "rules_proto",
+ version = "7.0.2",
+)
+bazel_dep(
+ name = "rules_python",
+ version = "0.35.0",
+)
+
+### PYTHON ###
+python = use_extension("@rules_python//python/extensions:python.bzl", "python")
+python.toolchain(
+ ignore_root_user_error = True,
+ python_version = "3.11",
+)
+
+go_sdk = use_extension("@io_bazel_rules_go//go:extensions.bzl", "go_sdk")
+go_sdk.download(version = "1.22.0")
+
+go_deps = use_extension("@bazel_gazelle//:extensions.bzl", "go_deps")
+go_deps.from_file(go_mod = "//:go.mod")
+use_repo(
+ go_deps,
+ "org_golang_google_genproto_googleapis_rpc",
+ "org_golang_google_protobuf",
+)
diff --git a/vendor/cel.dev/expr/README.md b/vendor/cel.dev/expr/README.md
new file mode 100644
index 000000000..42d67f87c
--- /dev/null
+++ b/vendor/cel.dev/expr/README.md
@@ -0,0 +1,71 @@
+# Common Expression Language
+
+The Common Expression Language (CEL) implements common semantics for expression
+evaluation, enabling different applications to more easily interoperate.
+
+Key Applications
+
+* Security policy: organizations have complex infrastructure and need common
+ tooling to reason about the system as a whole
+* Protocols: expressions are a useful data type and require interoperability
+ across programming languages and platforms.
+
+
+Guiding philosophy:
+
+1. Keep it small & fast.
+ * CEL evaluates in linear time, is mutation free, and not Turing-complete.
+ This limitation is a feature of the language design, which allows the
+ implementation to evaluate orders of magnitude faster than equivalently
+ sandboxed JavaScript.
+2. Make it extensible.
+ * CEL is designed to be embedded in applications, and allows for
+ extensibility via its context which allows for functions and data to be
+ provided by the software that embeds it.
+3. Developer-friendly.
+ * The language is approachable to developers. The initial spec was based
+ on the experience of developing Firebase Rules and usability testing
+ many prior iterations.
+ * The library itself and accompanying toolings should be easy to adopt by
+ teams that seek to integrate CEL into their platforms.
+
+The required components of a system that supports CEL are:
+
+* The textual representation of an expression as written by a developer. It is
+ of similar syntax to expressions in C/C++/Java/JavaScript
+* A representation of the program's abstract syntax tree (AST).
+* A compiler library that converts the textual representation to the binary
+ representation. This can be done ahead of time (in the control plane) or
+ just before evaluation (in the data plane).
+* A context containing one or more typed variables, often protobuf messages.
+ Most use-cases will use `attribute_context.proto`
+* An evaluator library that takes the binary format in the context and
+ produces a result, usually a Boolean.
+
+For use cases which require persistence or cross-process communcation, it is
+highly recommended to serialize the type-checked expression as a protocol
+buffer. The CEL team will maintains canonical protocol buffers for ASTs and
+will keep these versions identical and wire-compatible in perpetuity:
+
+* [CEL canonical](https://github.com/google/cel-spec/tree/master/proto/cel/expr)
+* [CEL v1alpha1](https://github.com/googleapis/googleapis/tree/master/google/api/expr/v1alpha1)
+
+
+Example of boolean conditions and object construction:
+
+``` c
+// Condition
+account.balance >= transaction.withdrawal
+ || (account.overdraftProtection
+ && account.overdraftLimit >= transaction.withdrawal - account.balance)
+
+// Object construction
+common.GeoPoint{ latitude: 10.0, longitude: -5.5 }
+```
+
+For more detail, see:
+
+* [Introduction](doc/intro.md)
+* [Language Definition](doc/langdef.md)
+
+Released under the [Apache License](LICENSE).
diff --git a/vendor/cel.dev/expr/WORKSPACE b/vendor/cel.dev/expr/WORKSPACE
new file mode 100644
index 000000000..b6dc9ed67
--- /dev/null
+++ b/vendor/cel.dev/expr/WORKSPACE
@@ -0,0 +1,145 @@
+load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
+
+http_archive(
+ name = "io_bazel_rules_go",
+ sha256 = "099a9fb96a376ccbbb7d291ed4ecbdfd42f6bc822ab77ae6f1b5cb9e914e94fa",
+ urls = [
+ "https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.35.0/rules_go-v0.35.0.zip",
+ "https://github.com/bazelbuild/rules_go/releases/download/v0.35.0/rules_go-v0.35.0.zip",
+ ],
+)
+
+http_archive(
+ name = "bazel_gazelle",
+ sha256 = "ecba0f04f96b4960a5b250c8e8eeec42281035970aa8852dda73098274d14a1d",
+ urls = [
+ "https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.29.0/bazel-gazelle-v0.29.0.tar.gz",
+ "https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.29.0/bazel-gazelle-v0.29.0.tar.gz",
+ ],
+)
+
+http_archive(
+ name = "rules_proto",
+ sha256 = "e017528fd1c91c5a33f15493e3a398181a9e821a804eb7ff5acdd1d2d6c2b18d",
+ strip_prefix = "rules_proto-4.0.0-3.20.0",
+ urls = [
+ "https://github.com/bazelbuild/rules_proto/archive/refs/tags/4.0.0-3.20.0.tar.gz",
+ ],
+)
+
+# googleapis as of 09/16/2024
+http_archive(
+ name = "com_google_googleapis",
+ strip_prefix = "googleapis-4082d5e51e8481f6ccc384cacd896f4e78f19dee",
+ sha256 = "57319889d47578b3c89bf1b3f34888d796a8913d63b32d750a4cd12ed303c4e8",
+ urls = [
+ "https://github.com/googleapis/googleapis/archive/4082d5e51e8481f6ccc384cacd896f4e78f19dee.tar.gz",
+ ],
+)
+
+# protobuf
+http_archive(
+ name = "com_google_protobuf",
+ sha256 = "8242327e5df8c80ba49e4165250b8f79a76bd11765facefaaecfca7747dc8da2",
+ strip_prefix = "protobuf-3.21.5",
+ urls = ["https://github.com/protocolbuffers/protobuf/archive/v3.21.5.zip"],
+)
+
+# googletest
+http_archive(
+ name = "com_google_googletest",
+ urls = ["https://github.com/google/googletest/archive/master.zip"],
+ strip_prefix = "googletest-master",
+)
+
+# gflags
+http_archive(
+ name = "com_github_gflags_gflags",
+ sha256 = "6e16c8bc91b1310a44f3965e616383dbda48f83e8c1eaa2370a215057b00cabe",
+ strip_prefix = "gflags-77592648e3f3be87d6c7123eb81cbad75f9aef5a",
+ urls = [
+ "https://mirror.bazel.build/github.com/gflags/gflags/archive/77592648e3f3be87d6c7123eb81cbad75f9aef5a.tar.gz",
+ "https://github.com/gflags/gflags/archive/77592648e3f3be87d6c7123eb81cbad75f9aef5a.tar.gz",
+ ],
+)
+
+# glog
+http_archive(
+ name = "com_google_glog",
+ sha256 = "1ee310e5d0a19b9d584a855000434bb724aa744745d5b8ab1855c85bff8a8e21",
+ strip_prefix = "glog-028d37889a1e80e8a07da1b8945ac706259e5fd8",
+ urls = [
+ "https://mirror.bazel.build/github.com/google/glog/archive/028d37889a1e80e8a07da1b8945ac706259e5fd8.tar.gz",
+ "https://github.com/google/glog/archive/028d37889a1e80e8a07da1b8945ac706259e5fd8.tar.gz",
+ ],
+)
+
+# absl
+http_archive(
+ name = "com_google_absl",
+ strip_prefix = "abseil-cpp-master",
+ urls = ["https://github.com/abseil/abseil-cpp/archive/master.zip"],
+)
+
+load("@io_bazel_rules_go//go:deps.bzl", "go_rules_dependencies", "go_register_toolchains")
+load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies", "go_repository")
+load("@com_google_googleapis//:repository_rules.bzl", "switched_rules_by_language")
+load("@rules_proto//proto:repositories.bzl", "rules_proto_dependencies", "rules_proto_toolchains")
+load("@com_google_protobuf//:protobuf_deps.bzl", "protobuf_deps")
+
+switched_rules_by_language(
+ name = "com_google_googleapis_imports",
+ cc = True,
+)
+
+# Do *not* call *_dependencies(), etc, yet. See comment at the end.
+
+# Generated Google APIs protos for Golang
+# Generated Google APIs protos for Golang 08/26/2024
+go_repository(
+ name = "org_golang_google_genproto_googleapis_api",
+ build_file_proto_mode = "disable_global",
+ importpath = "google.golang.org/genproto/googleapis/api",
+ sum = "h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw=",
+ version = "v0.0.0-20240826202546-f6391c0de4c7",
+)
+
+# Generated Google APIs protos for Golang 08/26/2024
+go_repository(
+ name = "org_golang_google_genproto_googleapis_rpc",
+ build_file_proto_mode = "disable_global",
+ importpath = "google.golang.org/genproto/googleapis/rpc",
+ sum = "h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs=",
+ version = "v0.0.0-20240826202546-f6391c0de4c7",
+)
+
+# gRPC deps
+go_repository(
+ name = "org_golang_google_grpc",
+ build_file_proto_mode = "disable_global",
+ importpath = "google.golang.org/grpc",
+ tag = "v1.49.0",
+)
+
+go_repository(
+ name = "org_golang_x_net",
+ importpath = "golang.org/x/net",
+ sum = "h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=",
+ version = "v0.0.0-20190311183353-d8887717615a",
+)
+
+go_repository(
+ name = "org_golang_x_text",
+ importpath = "golang.org/x/text",
+ sum = "h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=",
+ version = "v0.3.2",
+)
+
+# Run the dependencies at the end. These will silently try to import some
+# of the above repositories but at different versions, so ours must come first.
+go_rules_dependencies()
+go_register_toolchains(version = "1.19.1")
+gazelle_dependencies()
+rules_proto_dependencies()
+rules_proto_toolchains()
+protobuf_deps()
diff --git a/vendor/cel.dev/expr/WORKSPACE.bzlmod b/vendor/cel.dev/expr/WORKSPACE.bzlmod
new file mode 100644
index 000000000..e69de29bb
diff --git a/vendor/cel.dev/expr/checked.pb.go b/vendor/cel.dev/expr/checked.pb.go
new file mode 100644
index 000000000..bb225c8ab
--- /dev/null
+++ b/vendor/cel.dev/expr/checked.pb.go
@@ -0,0 +1,1432 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.28.1
+// protoc v3.21.5
+// source: cel/expr/checked.proto
+
+package expr
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type Type_PrimitiveType int32
+
+const (
+ Type_PRIMITIVE_TYPE_UNSPECIFIED Type_PrimitiveType = 0
+ Type_BOOL Type_PrimitiveType = 1
+ Type_INT64 Type_PrimitiveType = 2
+ Type_UINT64 Type_PrimitiveType = 3
+ Type_DOUBLE Type_PrimitiveType = 4
+ Type_STRING Type_PrimitiveType = 5
+ Type_BYTES Type_PrimitiveType = 6
+)
+
+// Enum value maps for Type_PrimitiveType.
+var (
+ Type_PrimitiveType_name = map[int32]string{
+ 0: "PRIMITIVE_TYPE_UNSPECIFIED",
+ 1: "BOOL",
+ 2: "INT64",
+ 3: "UINT64",
+ 4: "DOUBLE",
+ 5: "STRING",
+ 6: "BYTES",
+ }
+ Type_PrimitiveType_value = map[string]int32{
+ "PRIMITIVE_TYPE_UNSPECIFIED": 0,
+ "BOOL": 1,
+ "INT64": 2,
+ "UINT64": 3,
+ "DOUBLE": 4,
+ "STRING": 5,
+ "BYTES": 6,
+ }
+)
+
+func (x Type_PrimitiveType) Enum() *Type_PrimitiveType {
+ p := new(Type_PrimitiveType)
+ *p = x
+ return p
+}
+
+func (x Type_PrimitiveType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Type_PrimitiveType) Descriptor() protoreflect.EnumDescriptor {
+ return file_cel_expr_checked_proto_enumTypes[0].Descriptor()
+}
+
+func (Type_PrimitiveType) Type() protoreflect.EnumType {
+ return &file_cel_expr_checked_proto_enumTypes[0]
+}
+
+func (x Type_PrimitiveType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Type_PrimitiveType.Descriptor instead.
+func (Type_PrimitiveType) EnumDescriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 0}
+}
+
+type Type_WellKnownType int32
+
+const (
+ Type_WELL_KNOWN_TYPE_UNSPECIFIED Type_WellKnownType = 0
+ Type_ANY Type_WellKnownType = 1
+ Type_TIMESTAMP Type_WellKnownType = 2
+ Type_DURATION Type_WellKnownType = 3
+)
+
+// Enum value maps for Type_WellKnownType.
+var (
+ Type_WellKnownType_name = map[int32]string{
+ 0: "WELL_KNOWN_TYPE_UNSPECIFIED",
+ 1: "ANY",
+ 2: "TIMESTAMP",
+ 3: "DURATION",
+ }
+ Type_WellKnownType_value = map[string]int32{
+ "WELL_KNOWN_TYPE_UNSPECIFIED": 0,
+ "ANY": 1,
+ "TIMESTAMP": 2,
+ "DURATION": 3,
+ }
+)
+
+func (x Type_WellKnownType) Enum() *Type_WellKnownType {
+ p := new(Type_WellKnownType)
+ *p = x
+ return p
+}
+
+func (x Type_WellKnownType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Type_WellKnownType) Descriptor() protoreflect.EnumDescriptor {
+ return file_cel_expr_checked_proto_enumTypes[1].Descriptor()
+}
+
+func (Type_WellKnownType) Type() protoreflect.EnumType {
+ return &file_cel_expr_checked_proto_enumTypes[1]
+}
+
+func (x Type_WellKnownType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Type_WellKnownType.Descriptor instead.
+func (Type_WellKnownType) EnumDescriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 1}
+}
+
+type CheckedExpr struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ReferenceMap map[int64]*Reference `protobuf:"bytes,2,rep,name=reference_map,json=referenceMap,proto3" json:"reference_map,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ TypeMap map[int64]*Type `protobuf:"bytes,3,rep,name=type_map,json=typeMap,proto3" json:"type_map,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ SourceInfo *SourceInfo `protobuf:"bytes,5,opt,name=source_info,json=sourceInfo,proto3" json:"source_info,omitempty"`
+ ExprVersion string `protobuf:"bytes,6,opt,name=expr_version,json=exprVersion,proto3" json:"expr_version,omitempty"`
+ Expr *Expr `protobuf:"bytes,4,opt,name=expr,proto3" json:"expr,omitempty"`
+}
+
+func (x *CheckedExpr) Reset() {
+ *x = CheckedExpr{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CheckedExpr) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CheckedExpr) ProtoMessage() {}
+
+func (x *CheckedExpr) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CheckedExpr.ProtoReflect.Descriptor instead.
+func (*CheckedExpr) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *CheckedExpr) GetReferenceMap() map[int64]*Reference {
+ if x != nil {
+ return x.ReferenceMap
+ }
+ return nil
+}
+
+func (x *CheckedExpr) GetTypeMap() map[int64]*Type {
+ if x != nil {
+ return x.TypeMap
+ }
+ return nil
+}
+
+func (x *CheckedExpr) GetSourceInfo() *SourceInfo {
+ if x != nil {
+ return x.SourceInfo
+ }
+ return nil
+}
+
+func (x *CheckedExpr) GetExprVersion() string {
+ if x != nil {
+ return x.ExprVersion
+ }
+ return ""
+}
+
+func (x *CheckedExpr) GetExpr() *Expr {
+ if x != nil {
+ return x.Expr
+ }
+ return nil
+}
+
+type Type struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to TypeKind:
+ //
+ // *Type_Dyn
+ // *Type_Null
+ // *Type_Primitive
+ // *Type_Wrapper
+ // *Type_WellKnown
+ // *Type_ListType_
+ // *Type_MapType_
+ // *Type_Function
+ // *Type_MessageType
+ // *Type_TypeParam
+ // *Type_Type
+ // *Type_Error
+ // *Type_AbstractType_
+ TypeKind isType_TypeKind `protobuf_oneof:"type_kind"`
+}
+
+func (x *Type) Reset() {
+ *x = Type{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Type) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Type) ProtoMessage() {}
+
+func (x *Type) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Type.ProtoReflect.Descriptor instead.
+func (*Type) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{1}
+}
+
+func (m *Type) GetTypeKind() isType_TypeKind {
+ if m != nil {
+ return m.TypeKind
+ }
+ return nil
+}
+
+func (x *Type) GetDyn() *emptypb.Empty {
+ if x, ok := x.GetTypeKind().(*Type_Dyn); ok {
+ return x.Dyn
+ }
+ return nil
+}
+
+func (x *Type) GetNull() structpb.NullValue {
+ if x, ok := x.GetTypeKind().(*Type_Null); ok {
+ return x.Null
+ }
+ return structpb.NullValue(0)
+}
+
+func (x *Type) GetPrimitive() Type_PrimitiveType {
+ if x, ok := x.GetTypeKind().(*Type_Primitive); ok {
+ return x.Primitive
+ }
+ return Type_PRIMITIVE_TYPE_UNSPECIFIED
+}
+
+func (x *Type) GetWrapper() Type_PrimitiveType {
+ if x, ok := x.GetTypeKind().(*Type_Wrapper); ok {
+ return x.Wrapper
+ }
+ return Type_PRIMITIVE_TYPE_UNSPECIFIED
+}
+
+func (x *Type) GetWellKnown() Type_WellKnownType {
+ if x, ok := x.GetTypeKind().(*Type_WellKnown); ok {
+ return x.WellKnown
+ }
+ return Type_WELL_KNOWN_TYPE_UNSPECIFIED
+}
+
+func (x *Type) GetListType() *Type_ListType {
+ if x, ok := x.GetTypeKind().(*Type_ListType_); ok {
+ return x.ListType
+ }
+ return nil
+}
+
+func (x *Type) GetMapType() *Type_MapType {
+ if x, ok := x.GetTypeKind().(*Type_MapType_); ok {
+ return x.MapType
+ }
+ return nil
+}
+
+func (x *Type) GetFunction() *Type_FunctionType {
+ if x, ok := x.GetTypeKind().(*Type_Function); ok {
+ return x.Function
+ }
+ return nil
+}
+
+func (x *Type) GetMessageType() string {
+ if x, ok := x.GetTypeKind().(*Type_MessageType); ok {
+ return x.MessageType
+ }
+ return ""
+}
+
+func (x *Type) GetTypeParam() string {
+ if x, ok := x.GetTypeKind().(*Type_TypeParam); ok {
+ return x.TypeParam
+ }
+ return ""
+}
+
+func (x *Type) GetType() *Type {
+ if x, ok := x.GetTypeKind().(*Type_Type); ok {
+ return x.Type
+ }
+ return nil
+}
+
+func (x *Type) GetError() *emptypb.Empty {
+ if x, ok := x.GetTypeKind().(*Type_Error); ok {
+ return x.Error
+ }
+ return nil
+}
+
+func (x *Type) GetAbstractType() *Type_AbstractType {
+ if x, ok := x.GetTypeKind().(*Type_AbstractType_); ok {
+ return x.AbstractType
+ }
+ return nil
+}
+
+type isType_TypeKind interface {
+ isType_TypeKind()
+}
+
+type Type_Dyn struct {
+ Dyn *emptypb.Empty `protobuf:"bytes,1,opt,name=dyn,proto3,oneof"`
+}
+
+type Type_Null struct {
+ Null structpb.NullValue `protobuf:"varint,2,opt,name=null,proto3,enum=google.protobuf.NullValue,oneof"`
+}
+
+type Type_Primitive struct {
+ Primitive Type_PrimitiveType `protobuf:"varint,3,opt,name=primitive,proto3,enum=cel.expr.Type_PrimitiveType,oneof"`
+}
+
+type Type_Wrapper struct {
+ Wrapper Type_PrimitiveType `protobuf:"varint,4,opt,name=wrapper,proto3,enum=cel.expr.Type_PrimitiveType,oneof"`
+}
+
+type Type_WellKnown struct {
+ WellKnown Type_WellKnownType `protobuf:"varint,5,opt,name=well_known,json=wellKnown,proto3,enum=cel.expr.Type_WellKnownType,oneof"`
+}
+
+type Type_ListType_ struct {
+ ListType *Type_ListType `protobuf:"bytes,6,opt,name=list_type,json=listType,proto3,oneof"`
+}
+
+type Type_MapType_ struct {
+ MapType *Type_MapType `protobuf:"bytes,7,opt,name=map_type,json=mapType,proto3,oneof"`
+}
+
+type Type_Function struct {
+ Function *Type_FunctionType `protobuf:"bytes,8,opt,name=function,proto3,oneof"`
+}
+
+type Type_MessageType struct {
+ MessageType string `protobuf:"bytes,9,opt,name=message_type,json=messageType,proto3,oneof"`
+}
+
+type Type_TypeParam struct {
+ TypeParam string `protobuf:"bytes,10,opt,name=type_param,json=typeParam,proto3,oneof"`
+}
+
+type Type_Type struct {
+ Type *Type `protobuf:"bytes,11,opt,name=type,proto3,oneof"`
+}
+
+type Type_Error struct {
+ Error *emptypb.Empty `protobuf:"bytes,12,opt,name=error,proto3,oneof"`
+}
+
+type Type_AbstractType_ struct {
+ AbstractType *Type_AbstractType `protobuf:"bytes,14,opt,name=abstract_type,json=abstractType,proto3,oneof"`
+}
+
+func (*Type_Dyn) isType_TypeKind() {}
+
+func (*Type_Null) isType_TypeKind() {}
+
+func (*Type_Primitive) isType_TypeKind() {}
+
+func (*Type_Wrapper) isType_TypeKind() {}
+
+func (*Type_WellKnown) isType_TypeKind() {}
+
+func (*Type_ListType_) isType_TypeKind() {}
+
+func (*Type_MapType_) isType_TypeKind() {}
+
+func (*Type_Function) isType_TypeKind() {}
+
+func (*Type_MessageType) isType_TypeKind() {}
+
+func (*Type_TypeParam) isType_TypeKind() {}
+
+func (*Type_Type) isType_TypeKind() {}
+
+func (*Type_Error) isType_TypeKind() {}
+
+func (*Type_AbstractType_) isType_TypeKind() {}
+
+type Decl struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Types that are assignable to DeclKind:
+ //
+ // *Decl_Ident
+ // *Decl_Function
+ DeclKind isDecl_DeclKind `protobuf_oneof:"decl_kind"`
+}
+
+func (x *Decl) Reset() {
+ *x = Decl{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Decl) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Decl) ProtoMessage() {}
+
+func (x *Decl) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Decl.ProtoReflect.Descriptor instead.
+func (*Decl) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *Decl) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (m *Decl) GetDeclKind() isDecl_DeclKind {
+ if m != nil {
+ return m.DeclKind
+ }
+ return nil
+}
+
+func (x *Decl) GetIdent() *Decl_IdentDecl {
+ if x, ok := x.GetDeclKind().(*Decl_Ident); ok {
+ return x.Ident
+ }
+ return nil
+}
+
+func (x *Decl) GetFunction() *Decl_FunctionDecl {
+ if x, ok := x.GetDeclKind().(*Decl_Function); ok {
+ return x.Function
+ }
+ return nil
+}
+
+type isDecl_DeclKind interface {
+ isDecl_DeclKind()
+}
+
+type Decl_Ident struct {
+ Ident *Decl_IdentDecl `protobuf:"bytes,2,opt,name=ident,proto3,oneof"`
+}
+
+type Decl_Function struct {
+ Function *Decl_FunctionDecl `protobuf:"bytes,3,opt,name=function,proto3,oneof"`
+}
+
+func (*Decl_Ident) isDecl_DeclKind() {}
+
+func (*Decl_Function) isDecl_DeclKind() {}
+
+type Reference struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ OverloadId []string `protobuf:"bytes,3,rep,name=overload_id,json=overloadId,proto3" json:"overload_id,omitempty"`
+ Value *Constant `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *Reference) Reset() {
+ *x = Reference{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Reference) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Reference) ProtoMessage() {}
+
+func (x *Reference) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Reference.ProtoReflect.Descriptor instead.
+func (*Reference) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *Reference) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *Reference) GetOverloadId() []string {
+ if x != nil {
+ return x.OverloadId
+ }
+ return nil
+}
+
+func (x *Reference) GetValue() *Constant {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+type Type_ListType struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ElemType *Type `protobuf:"bytes,1,opt,name=elem_type,json=elemType,proto3" json:"elem_type,omitempty"`
+}
+
+func (x *Type_ListType) Reset() {
+ *x = Type_ListType{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Type_ListType) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Type_ListType) ProtoMessage() {}
+
+func (x *Type_ListType) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Type_ListType.ProtoReflect.Descriptor instead.
+func (*Type_ListType) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 0}
+}
+
+func (x *Type_ListType) GetElemType() *Type {
+ if x != nil {
+ return x.ElemType
+ }
+ return nil
+}
+
+type Type_MapType struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ KeyType *Type `protobuf:"bytes,1,opt,name=key_type,json=keyType,proto3" json:"key_type,omitempty"`
+ ValueType *Type `protobuf:"bytes,2,opt,name=value_type,json=valueType,proto3" json:"value_type,omitempty"`
+}
+
+func (x *Type_MapType) Reset() {
+ *x = Type_MapType{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Type_MapType) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Type_MapType) ProtoMessage() {}
+
+func (x *Type_MapType) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Type_MapType.ProtoReflect.Descriptor instead.
+func (*Type_MapType) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 1}
+}
+
+func (x *Type_MapType) GetKeyType() *Type {
+ if x != nil {
+ return x.KeyType
+ }
+ return nil
+}
+
+func (x *Type_MapType) GetValueType() *Type {
+ if x != nil {
+ return x.ValueType
+ }
+ return nil
+}
+
+type Type_FunctionType struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ResultType *Type `protobuf:"bytes,1,opt,name=result_type,json=resultType,proto3" json:"result_type,omitempty"`
+ ArgTypes []*Type `protobuf:"bytes,2,rep,name=arg_types,json=argTypes,proto3" json:"arg_types,omitempty"`
+}
+
+func (x *Type_FunctionType) Reset() {
+ *x = Type_FunctionType{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Type_FunctionType) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Type_FunctionType) ProtoMessage() {}
+
+func (x *Type_FunctionType) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Type_FunctionType.ProtoReflect.Descriptor instead.
+func (*Type_FunctionType) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 2}
+}
+
+func (x *Type_FunctionType) GetResultType() *Type {
+ if x != nil {
+ return x.ResultType
+ }
+ return nil
+}
+
+func (x *Type_FunctionType) GetArgTypes() []*Type {
+ if x != nil {
+ return x.ArgTypes
+ }
+ return nil
+}
+
+type Type_AbstractType struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ ParameterTypes []*Type `protobuf:"bytes,2,rep,name=parameter_types,json=parameterTypes,proto3" json:"parameter_types,omitempty"`
+}
+
+func (x *Type_AbstractType) Reset() {
+ *x = Type_AbstractType{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Type_AbstractType) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Type_AbstractType) ProtoMessage() {}
+
+func (x *Type_AbstractType) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Type_AbstractType.ProtoReflect.Descriptor instead.
+func (*Type_AbstractType) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 3}
+}
+
+func (x *Type_AbstractType) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *Type_AbstractType) GetParameterTypes() []*Type {
+ if x != nil {
+ return x.ParameterTypes
+ }
+ return nil
+}
+
+type Decl_IdentDecl struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Type *Type `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+ Value *Constant `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+ Doc string `protobuf:"bytes,3,opt,name=doc,proto3" json:"doc,omitempty"`
+}
+
+func (x *Decl_IdentDecl) Reset() {
+ *x = Decl_IdentDecl{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Decl_IdentDecl) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Decl_IdentDecl) ProtoMessage() {}
+
+func (x *Decl_IdentDecl) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Decl_IdentDecl.ProtoReflect.Descriptor instead.
+func (*Decl_IdentDecl) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{2, 0}
+}
+
+func (x *Decl_IdentDecl) GetType() *Type {
+ if x != nil {
+ return x.Type
+ }
+ return nil
+}
+
+func (x *Decl_IdentDecl) GetValue() *Constant {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+func (x *Decl_IdentDecl) GetDoc() string {
+ if x != nil {
+ return x.Doc
+ }
+ return ""
+}
+
+type Decl_FunctionDecl struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Overloads []*Decl_FunctionDecl_Overload `protobuf:"bytes,1,rep,name=overloads,proto3" json:"overloads,omitempty"`
+}
+
+func (x *Decl_FunctionDecl) Reset() {
+ *x = Decl_FunctionDecl{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Decl_FunctionDecl) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Decl_FunctionDecl) ProtoMessage() {}
+
+func (x *Decl_FunctionDecl) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Decl_FunctionDecl.ProtoReflect.Descriptor instead.
+func (*Decl_FunctionDecl) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{2, 1}
+}
+
+func (x *Decl_FunctionDecl) GetOverloads() []*Decl_FunctionDecl_Overload {
+ if x != nil {
+ return x.Overloads
+ }
+ return nil
+}
+
+type Decl_FunctionDecl_Overload struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ OverloadId string `protobuf:"bytes,1,opt,name=overload_id,json=overloadId,proto3" json:"overload_id,omitempty"`
+ Params []*Type `protobuf:"bytes,2,rep,name=params,proto3" json:"params,omitempty"`
+ TypeParams []string `protobuf:"bytes,3,rep,name=type_params,json=typeParams,proto3" json:"type_params,omitempty"`
+ ResultType *Type `protobuf:"bytes,4,opt,name=result_type,json=resultType,proto3" json:"result_type,omitempty"`
+ IsInstanceFunction bool `protobuf:"varint,5,opt,name=is_instance_function,json=isInstanceFunction,proto3" json:"is_instance_function,omitempty"`
+ Doc string `protobuf:"bytes,6,opt,name=doc,proto3" json:"doc,omitempty"`
+}
+
+func (x *Decl_FunctionDecl_Overload) Reset() {
+ *x = Decl_FunctionDecl_Overload{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Decl_FunctionDecl_Overload) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Decl_FunctionDecl_Overload) ProtoMessage() {}
+
+func (x *Decl_FunctionDecl_Overload) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Decl_FunctionDecl_Overload.ProtoReflect.Descriptor instead.
+func (*Decl_FunctionDecl_Overload) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{2, 1, 0}
+}
+
+func (x *Decl_FunctionDecl_Overload) GetOverloadId() string {
+ if x != nil {
+ return x.OverloadId
+ }
+ return ""
+}
+
+func (x *Decl_FunctionDecl_Overload) GetParams() []*Type {
+ if x != nil {
+ return x.Params
+ }
+ return nil
+}
+
+func (x *Decl_FunctionDecl_Overload) GetTypeParams() []string {
+ if x != nil {
+ return x.TypeParams
+ }
+ return nil
+}
+
+func (x *Decl_FunctionDecl_Overload) GetResultType() *Type {
+ if x != nil {
+ return x.ResultType
+ }
+ return nil
+}
+
+func (x *Decl_FunctionDecl_Overload) GetIsInstanceFunction() bool {
+ if x != nil {
+ return x.IsInstanceFunction
+ }
+ return false
+}
+
+func (x *Decl_FunctionDecl_Overload) GetDoc() string {
+ if x != nil {
+ return x.Doc
+ }
+ return ""
+}
+
+var File_cel_expr_checked_proto protoreflect.FileDescriptor
+
+var file_cel_expr_checked_proto_rawDesc = []byte{
+ 0x0a, 0x16, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x63, 0x68, 0x65, 0x63, 0x6b,
+ 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78,
+ 0x70, 0x72, 0x1a, 0x15, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x73, 0x79, 0x6e,
+ 0x74, 0x61, 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xba, 0x03, 0x0a, 0x0b, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64,
+ 0x45, 0x78, 0x70, 0x72, 0x12, 0x4c, 0x0a, 0x0d, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63,
+ 0x65, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78,
+ 0x70, 0x72, 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d, 0x61, 0x70, 0x45,
+ 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d,
+ 0x61, 0x70, 0x12, 0x3d, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x03,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65,
+ 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x4d, 0x61,
+ 0x70, 0x12, 0x35, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f,
+ 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70,
+ 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x21, 0x0a, 0x0c, 0x65, 0x78, 0x70, 0x72,
+ 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
+ 0x65, 0x78, 0x70, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x04, 0x65,
+ 0x78, 0x70, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
+ 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x04, 0x65, 0x78, 0x70, 0x72, 0x1a,
+ 0x54, 0x0a, 0x11, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d, 0x61, 0x70, 0x45,
+ 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72,
+ 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4a, 0x0a, 0x0c, 0x54, 0x79, 0x70, 0x65, 0x4d, 0x61, 0x70,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70,
+ 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
+ 0x01, 0x22, 0xe6, 0x09, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2a, 0x0a, 0x03, 0x64, 0x79,
+ 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48,
+ 0x00, 0x52, 0x03, 0x64, 0x79, 0x6e, 0x12, 0x30, 0x0a, 0x04, 0x6e, 0x75, 0x6c, 0x6c, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x48, 0x00, 0x52, 0x04, 0x6e, 0x75, 0x6c, 0x6c, 0x12, 0x3c, 0x0a, 0x09, 0x70, 0x72, 0x69, 0x6d,
+ 0x69, 0x74, 0x69, 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x50, 0x72, 0x69, 0x6d,
+ 0x69, 0x74, 0x69, 0x76, 0x65, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x09, 0x70, 0x72, 0x69,
+ 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65,
+ 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78,
+ 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76,
+ 0x65, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72,
+ 0x12, 0x3d, 0x0a, 0x0a, 0x77, 0x65, 0x6c, 0x6c, 0x5f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x54, 0x79, 0x70, 0x65, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79,
+ 0x70, 0x65, 0x48, 0x00, 0x52, 0x09, 0x77, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x12,
+ 0x36, 0x0a, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79,
+ 0x70, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x08, 0x6c,
+ 0x69, 0x73, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x33, 0x0a, 0x08, 0x6d, 0x61, 0x70, 0x5f, 0x74,
+ 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
+ 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x4d, 0x61, 0x70, 0x54, 0x79, 0x70,
+ 0x65, 0x48, 0x00, 0x52, 0x07, 0x6d, 0x61, 0x70, 0x54, 0x79, 0x70, 0x65, 0x12, 0x39, 0x0a, 0x08,
+ 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b,
+ 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x46,
+ 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x08, 0x66,
+ 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61,
+ 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52,
+ 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0a,
+ 0x74, 0x79, 0x70, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09,
+ 0x48, 0x00, 0x52, 0x09, 0x74, 0x79, 0x70, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x12, 0x24, 0x0a,
+ 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x04, 0x74,
+ 0x79, 0x70, 0x65, 0x12, 0x2e, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x0c, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48, 0x00, 0x52, 0x05, 0x65, 0x72,
+ 0x72, 0x6f, 0x72, 0x12, 0x42, 0x0a, 0x0d, 0x61, 0x62, 0x73, 0x74, 0x72, 0x61, 0x63, 0x74, 0x5f,
+ 0x74, 0x79, 0x70, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x65, 0x6c,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x41, 0x62, 0x73, 0x74, 0x72,
+ 0x61, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x0c, 0x61, 0x62, 0x73, 0x74, 0x72,
+ 0x61, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x37, 0x0a, 0x08, 0x4c, 0x69, 0x73, 0x74, 0x54,
+ 0x79, 0x70, 0x65, 0x12, 0x2b, 0x0a, 0x09, 0x65, 0x6c, 0x65, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70,
+ 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x65, 0x6c, 0x65, 0x6d, 0x54, 0x79, 0x70, 0x65,
+ 0x1a, 0x63, 0x0a, 0x07, 0x4d, 0x61, 0x70, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x08, 0x6b,
+ 0x65, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e,
+ 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x6b,
+ 0x65, 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2d, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f,
+ 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x6c, 0x0a, 0x0c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5f,
+ 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x75,
+ 0x6c, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2b, 0x0a, 0x09, 0x61, 0x72, 0x67, 0x5f, 0x74, 0x79,
+ 0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
+ 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x61, 0x72, 0x67, 0x54, 0x79,
+ 0x70, 0x65, 0x73, 0x1a, 0x5b, 0x0a, 0x0c, 0x41, 0x62, 0x73, 0x74, 0x72, 0x61, 0x63, 0x74, 0x54,
+ 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x0f, 0x70, 0x61, 0x72, 0x61, 0x6d,
+ 0x65, 0x74, 0x65, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65,
+ 0x52, 0x0e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x73,
+ 0x22, 0x73, 0x0a, 0x0d, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x54, 0x79, 0x70,
+ 0x65, 0x12, 0x1e, 0x0a, 0x1a, 0x50, 0x52, 0x49, 0x4d, 0x49, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x54,
+ 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10,
+ 0x00, 0x12, 0x08, 0x0a, 0x04, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x49,
+ 0x4e, 0x54, 0x36, 0x34, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34,
+ 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x04, 0x12, 0x0a,
+ 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x09, 0x0a, 0x05, 0x42, 0x59,
+ 0x54, 0x45, 0x53, 0x10, 0x06, 0x22, 0x56, 0x0a, 0x0d, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f,
+ 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x57, 0x45, 0x4c, 0x4c, 0x5f, 0x4b,
+ 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43,
+ 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4e, 0x59, 0x10, 0x01,
+ 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x49, 0x4d, 0x45, 0x53, 0x54, 0x41, 0x4d, 0x50, 0x10, 0x02, 0x12,
+ 0x0c, 0x0a, 0x08, 0x44, 0x55, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x42, 0x0b, 0x0a,
+ 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xc2, 0x04, 0x0a, 0x04, 0x44,
+ 0x65, 0x63, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x30, 0x0a, 0x05, 0x69, 0x64, 0x65, 0x6e, 0x74,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70,
+ 0x72, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x44, 0x65, 0x63, 0x6c,
+ 0x48, 0x00, 0x52, 0x05, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x12, 0x39, 0x0a, 0x08, 0x66, 0x75, 0x6e,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x46, 0x75, 0x6e, 0x63,
+ 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x63, 0x6c, 0x48, 0x00, 0x52, 0x08, 0x66, 0x75, 0x6e, 0x63,
+ 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x6b, 0x0a, 0x09, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x44, 0x65, 0x63,
+ 0x6c, 0x12, 0x22, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52,
+ 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12,
+ 0x10, 0x0a, 0x03, 0x64, 0x6f, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64, 0x6f,
+ 0x63, 0x1a, 0xbe, 0x02, 0x0a, 0x0c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65,
+ 0x63, 0x6c, 0x12, 0x42, 0x0a, 0x09, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x18,
+ 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72,
+ 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65,
+ 0x63, 0x6c, 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x09, 0x6f, 0x76, 0x65,
+ 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x1a, 0xe9, 0x01, 0x0a, 0x08, 0x4f, 0x76, 0x65, 0x72, 0x6c,
+ 0x6f, 0x61, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x5f,
+ 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f,
+ 0x61, 0x64, 0x49, 0x64, 0x12, 0x26, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x02,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x54, 0x79, 0x70, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x1f, 0x0a, 0x0b,
+ 0x74, 0x79, 0x70, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28,
+ 0x09, 0x52, 0x0a, 0x74, 0x79, 0x70, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x2f, 0x0a,
+ 0x0b, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79,
+ 0x70, 0x65, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x30,
+ 0x0a, 0x14, 0x69, 0x73, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x66, 0x75,
+ 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x69, 0x73,
+ 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x12, 0x10, 0x0a, 0x03, 0x64, 0x6f, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64,
+ 0x6f, 0x63, 0x42, 0x0b, 0x0a, 0x09, 0x64, 0x65, 0x63, 0x6c, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22,
+ 0x6a, 0x0a, 0x09, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18,
+ 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x49,
+ 0x64, 0x12, 0x28, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x73,
+ 0x74, 0x61, 0x6e, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x2c, 0x0a, 0x0c, 0x64,
+ 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x09, 0x44, 0x65, 0x63,
+ 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, 0x65,
+ 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x33,
+}
+
+var (
+ file_cel_expr_checked_proto_rawDescOnce sync.Once
+ file_cel_expr_checked_proto_rawDescData = file_cel_expr_checked_proto_rawDesc
+)
+
+func file_cel_expr_checked_proto_rawDescGZIP() []byte {
+ file_cel_expr_checked_proto_rawDescOnce.Do(func() {
+ file_cel_expr_checked_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_checked_proto_rawDescData)
+ })
+ return file_cel_expr_checked_proto_rawDescData
+}
+
+var file_cel_expr_checked_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
+var file_cel_expr_checked_proto_msgTypes = make([]protoimpl.MessageInfo, 13)
+var file_cel_expr_checked_proto_goTypes = []interface{}{
+ (Type_PrimitiveType)(0), // 0: cel.expr.Type.PrimitiveType
+ (Type_WellKnownType)(0), // 1: cel.expr.Type.WellKnownType
+ (*CheckedExpr)(nil), // 2: cel.expr.CheckedExpr
+ (*Type)(nil), // 3: cel.expr.Type
+ (*Decl)(nil), // 4: cel.expr.Decl
+ (*Reference)(nil), // 5: cel.expr.Reference
+ nil, // 6: cel.expr.CheckedExpr.ReferenceMapEntry
+ nil, // 7: cel.expr.CheckedExpr.TypeMapEntry
+ (*Type_ListType)(nil), // 8: cel.expr.Type.ListType
+ (*Type_MapType)(nil), // 9: cel.expr.Type.MapType
+ (*Type_FunctionType)(nil), // 10: cel.expr.Type.FunctionType
+ (*Type_AbstractType)(nil), // 11: cel.expr.Type.AbstractType
+ (*Decl_IdentDecl)(nil), // 12: cel.expr.Decl.IdentDecl
+ (*Decl_FunctionDecl)(nil), // 13: cel.expr.Decl.FunctionDecl
+ (*Decl_FunctionDecl_Overload)(nil), // 14: cel.expr.Decl.FunctionDecl.Overload
+ (*SourceInfo)(nil), // 15: cel.expr.SourceInfo
+ (*Expr)(nil), // 16: cel.expr.Expr
+ (*emptypb.Empty)(nil), // 17: google.protobuf.Empty
+ (structpb.NullValue)(0), // 18: google.protobuf.NullValue
+ (*Constant)(nil), // 19: cel.expr.Constant
+}
+var file_cel_expr_checked_proto_depIdxs = []int32{
+ 6, // 0: cel.expr.CheckedExpr.reference_map:type_name -> cel.expr.CheckedExpr.ReferenceMapEntry
+ 7, // 1: cel.expr.CheckedExpr.type_map:type_name -> cel.expr.CheckedExpr.TypeMapEntry
+ 15, // 2: cel.expr.CheckedExpr.source_info:type_name -> cel.expr.SourceInfo
+ 16, // 3: cel.expr.CheckedExpr.expr:type_name -> cel.expr.Expr
+ 17, // 4: cel.expr.Type.dyn:type_name -> google.protobuf.Empty
+ 18, // 5: cel.expr.Type.null:type_name -> google.protobuf.NullValue
+ 0, // 6: cel.expr.Type.primitive:type_name -> cel.expr.Type.PrimitiveType
+ 0, // 7: cel.expr.Type.wrapper:type_name -> cel.expr.Type.PrimitiveType
+ 1, // 8: cel.expr.Type.well_known:type_name -> cel.expr.Type.WellKnownType
+ 8, // 9: cel.expr.Type.list_type:type_name -> cel.expr.Type.ListType
+ 9, // 10: cel.expr.Type.map_type:type_name -> cel.expr.Type.MapType
+ 10, // 11: cel.expr.Type.function:type_name -> cel.expr.Type.FunctionType
+ 3, // 12: cel.expr.Type.type:type_name -> cel.expr.Type
+ 17, // 13: cel.expr.Type.error:type_name -> google.protobuf.Empty
+ 11, // 14: cel.expr.Type.abstract_type:type_name -> cel.expr.Type.AbstractType
+ 12, // 15: cel.expr.Decl.ident:type_name -> cel.expr.Decl.IdentDecl
+ 13, // 16: cel.expr.Decl.function:type_name -> cel.expr.Decl.FunctionDecl
+ 19, // 17: cel.expr.Reference.value:type_name -> cel.expr.Constant
+ 5, // 18: cel.expr.CheckedExpr.ReferenceMapEntry.value:type_name -> cel.expr.Reference
+ 3, // 19: cel.expr.CheckedExpr.TypeMapEntry.value:type_name -> cel.expr.Type
+ 3, // 20: cel.expr.Type.ListType.elem_type:type_name -> cel.expr.Type
+ 3, // 21: cel.expr.Type.MapType.key_type:type_name -> cel.expr.Type
+ 3, // 22: cel.expr.Type.MapType.value_type:type_name -> cel.expr.Type
+ 3, // 23: cel.expr.Type.FunctionType.result_type:type_name -> cel.expr.Type
+ 3, // 24: cel.expr.Type.FunctionType.arg_types:type_name -> cel.expr.Type
+ 3, // 25: cel.expr.Type.AbstractType.parameter_types:type_name -> cel.expr.Type
+ 3, // 26: cel.expr.Decl.IdentDecl.type:type_name -> cel.expr.Type
+ 19, // 27: cel.expr.Decl.IdentDecl.value:type_name -> cel.expr.Constant
+ 14, // 28: cel.expr.Decl.FunctionDecl.overloads:type_name -> cel.expr.Decl.FunctionDecl.Overload
+ 3, // 29: cel.expr.Decl.FunctionDecl.Overload.params:type_name -> cel.expr.Type
+ 3, // 30: cel.expr.Decl.FunctionDecl.Overload.result_type:type_name -> cel.expr.Type
+ 31, // [31:31] is the sub-list for method output_type
+ 31, // [31:31] is the sub-list for method input_type
+ 31, // [31:31] is the sub-list for extension type_name
+ 31, // [31:31] is the sub-list for extension extendee
+ 0, // [0:31] is the sub-list for field type_name
+}
+
+func init() { file_cel_expr_checked_proto_init() }
+func file_cel_expr_checked_proto_init() {
+ if File_cel_expr_checked_proto != nil {
+ return
+ }
+ file_cel_expr_syntax_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_cel_expr_checked_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CheckedExpr); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Type); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Decl); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Reference); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Type_ListType); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Type_MapType); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Type_FunctionType); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Type_AbstractType); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Decl_IdentDecl); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Decl_FunctionDecl); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Decl_FunctionDecl_Overload); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[1].OneofWrappers = []interface{}{
+ (*Type_Dyn)(nil),
+ (*Type_Null)(nil),
+ (*Type_Primitive)(nil),
+ (*Type_Wrapper)(nil),
+ (*Type_WellKnown)(nil),
+ (*Type_ListType_)(nil),
+ (*Type_MapType_)(nil),
+ (*Type_Function)(nil),
+ (*Type_MessageType)(nil),
+ (*Type_TypeParam)(nil),
+ (*Type_Type)(nil),
+ (*Type_Error)(nil),
+ (*Type_AbstractType_)(nil),
+ }
+ file_cel_expr_checked_proto_msgTypes[2].OneofWrappers = []interface{}{
+ (*Decl_Ident)(nil),
+ (*Decl_Function)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_cel_expr_checked_proto_rawDesc,
+ NumEnums: 2,
+ NumMessages: 13,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_cel_expr_checked_proto_goTypes,
+ DependencyIndexes: file_cel_expr_checked_proto_depIdxs,
+ EnumInfos: file_cel_expr_checked_proto_enumTypes,
+ MessageInfos: file_cel_expr_checked_proto_msgTypes,
+ }.Build()
+ File_cel_expr_checked_proto = out.File
+ file_cel_expr_checked_proto_rawDesc = nil
+ file_cel_expr_checked_proto_goTypes = nil
+ file_cel_expr_checked_proto_depIdxs = nil
+}
diff --git a/vendor/cel.dev/expr/cloudbuild.yaml b/vendor/cel.dev/expr/cloudbuild.yaml
new file mode 100644
index 000000000..e3e533a04
--- /dev/null
+++ b/vendor/cel.dev/expr/cloudbuild.yaml
@@ -0,0 +1,9 @@
+steps:
+- name: 'gcr.io/cloud-builders/bazel:7.3.2'
+ entrypoint: bazel
+ args: ['build', '...']
+ id: bazel-build
+ waitFor: ['-']
+timeout: 15m
+options:
+ machineType: 'N1_HIGHCPU_32'
diff --git a/vendor/cel.dev/expr/eval.pb.go b/vendor/cel.dev/expr/eval.pb.go
new file mode 100644
index 000000000..a7aae0900
--- /dev/null
+++ b/vendor/cel.dev/expr/eval.pb.go
@@ -0,0 +1,487 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.36.3
+// protoc v5.27.1
+// source: cel/expr/eval.proto
+
+package expr
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type EvalState struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Values []*ExprValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+ Results []*EvalState_Result `protobuf:"bytes,3,rep,name=results,proto3" json:"results,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *EvalState) Reset() {
+ *x = EvalState{}
+ mi := &file_cel_expr_eval_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *EvalState) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EvalState) ProtoMessage() {}
+
+func (x *EvalState) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_eval_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EvalState.ProtoReflect.Descriptor instead.
+func (*EvalState) Descriptor() ([]byte, []int) {
+ return file_cel_expr_eval_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *EvalState) GetValues() []*ExprValue {
+ if x != nil {
+ return x.Values
+ }
+ return nil
+}
+
+func (x *EvalState) GetResults() []*EvalState_Result {
+ if x != nil {
+ return x.Results
+ }
+ return nil
+}
+
+type ExprValue struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // Types that are valid to be assigned to Kind:
+ //
+ // *ExprValue_Value
+ // *ExprValue_Error
+ // *ExprValue_Unknown
+ Kind isExprValue_Kind `protobuf_oneof:"kind"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *ExprValue) Reset() {
+ *x = ExprValue{}
+ mi := &file_cel_expr_eval_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ExprValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExprValue) ProtoMessage() {}
+
+func (x *ExprValue) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_eval_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExprValue.ProtoReflect.Descriptor instead.
+func (*ExprValue) Descriptor() ([]byte, []int) {
+ return file_cel_expr_eval_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ExprValue) GetKind() isExprValue_Kind {
+ if x != nil {
+ return x.Kind
+ }
+ return nil
+}
+
+func (x *ExprValue) GetValue() *Value {
+ if x != nil {
+ if x, ok := x.Kind.(*ExprValue_Value); ok {
+ return x.Value
+ }
+ }
+ return nil
+}
+
+func (x *ExprValue) GetError() *ErrorSet {
+ if x != nil {
+ if x, ok := x.Kind.(*ExprValue_Error); ok {
+ return x.Error
+ }
+ }
+ return nil
+}
+
+func (x *ExprValue) GetUnknown() *UnknownSet {
+ if x != nil {
+ if x, ok := x.Kind.(*ExprValue_Unknown); ok {
+ return x.Unknown
+ }
+ }
+ return nil
+}
+
+type isExprValue_Kind interface {
+ isExprValue_Kind()
+}
+
+type ExprValue_Value struct {
+ Value *Value `protobuf:"bytes,1,opt,name=value,proto3,oneof"`
+}
+
+type ExprValue_Error struct {
+ Error *ErrorSet `protobuf:"bytes,2,opt,name=error,proto3,oneof"`
+}
+
+type ExprValue_Unknown struct {
+ Unknown *UnknownSet `protobuf:"bytes,3,opt,name=unknown,proto3,oneof"`
+}
+
+func (*ExprValue_Value) isExprValue_Kind() {}
+
+func (*ExprValue_Error) isExprValue_Kind() {}
+
+func (*ExprValue_Unknown) isExprValue_Kind() {}
+
+type ErrorSet struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Errors []*Status `protobuf:"bytes,1,rep,name=errors,proto3" json:"errors,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *ErrorSet) Reset() {
+ *x = ErrorSet{}
+ mi := &file_cel_expr_eval_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ErrorSet) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ErrorSet) ProtoMessage() {}
+
+func (x *ErrorSet) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_eval_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ErrorSet.ProtoReflect.Descriptor instead.
+func (*ErrorSet) Descriptor() ([]byte, []int) {
+ return file_cel_expr_eval_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ErrorSet) GetErrors() []*Status {
+ if x != nil {
+ return x.Errors
+ }
+ return nil
+}
+
+type Status struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
+ Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
+ Details []*anypb.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Status) Reset() {
+ *x = Status{}
+ mi := &file_cel_expr_eval_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Status) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Status) ProtoMessage() {}
+
+func (x *Status) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_eval_proto_msgTypes[3]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Status.ProtoReflect.Descriptor instead.
+func (*Status) Descriptor() ([]byte, []int) {
+ return file_cel_expr_eval_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *Status) GetCode() int32 {
+ if x != nil {
+ return x.Code
+ }
+ return 0
+}
+
+func (x *Status) GetMessage() string {
+ if x != nil {
+ return x.Message
+ }
+ return ""
+}
+
+func (x *Status) GetDetails() []*anypb.Any {
+ if x != nil {
+ return x.Details
+ }
+ return nil
+}
+
+type UnknownSet struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Exprs []int64 `protobuf:"varint,1,rep,packed,name=exprs,proto3" json:"exprs,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *UnknownSet) Reset() {
+ *x = UnknownSet{}
+ mi := &file_cel_expr_eval_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *UnknownSet) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UnknownSet) ProtoMessage() {}
+
+func (x *UnknownSet) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_eval_proto_msgTypes[4]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UnknownSet.ProtoReflect.Descriptor instead.
+func (*UnknownSet) Descriptor() ([]byte, []int) {
+ return file_cel_expr_eval_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *UnknownSet) GetExprs() []int64 {
+ if x != nil {
+ return x.Exprs
+ }
+ return nil
+}
+
+type EvalState_Result struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Expr int64 `protobuf:"varint,1,opt,name=expr,proto3" json:"expr,omitempty"`
+ Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *EvalState_Result) Reset() {
+ *x = EvalState_Result{}
+ mi := &file_cel_expr_eval_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *EvalState_Result) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EvalState_Result) ProtoMessage() {}
+
+func (x *EvalState_Result) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_eval_proto_msgTypes[5]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EvalState_Result.ProtoReflect.Descriptor instead.
+func (*EvalState_Result) Descriptor() ([]byte, []int) {
+ return file_cel_expr_eval_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (x *EvalState_Result) GetExpr() int64 {
+ if x != nil {
+ return x.Expr
+ }
+ return 0
+}
+
+func (x *EvalState_Result) GetValue() int64 {
+ if x != nil {
+ return x.Value
+ }
+ return 0
+}
+
+var File_cel_expr_eval_proto protoreflect.FileDescriptor
+
+var file_cel_expr_eval_proto_rawDesc = []byte{
+ 0x0a, 0x13, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x65, 0x76, 0x61, 0x6c, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x1a,
+ 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x14, 0x63, 0x65, 0x6c, 0x2f,
+ 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x22, 0xa2, 0x01, 0x0a, 0x09, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2b,
+ 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13,
+ 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x72,
+ 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63,
+ 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74,
+ 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74,
+ 0x73, 0x1a, 0x32, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65,
+ 0x78, 0x70, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x65, 0x78, 0x70, 0x72, 0x12,
+ 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x9a, 0x01, 0x0a, 0x09, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2a, 0x0a, 0x05,
+ 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x48,
+ 0x00, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x30, 0x0a, 0x07, 0x75, 0x6e, 0x6b, 0x6e,
+ 0x6f, 0x77, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
+ 0x65, 0x78, 0x70, 0x72, 0x2e, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x48,
+ 0x00, 0x52, 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69,
+ 0x6e, 0x64, 0x22, 0x34, 0x0a, 0x08, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x28,
+ 0x0a, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10,
+ 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x22, 0x66, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05,
+ 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
+ 0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73,
+ 0x22, 0x22, 0x0a, 0x0a, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x12, 0x14,
+ 0x0a, 0x05, 0x65, 0x78, 0x70, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x03, 0x52, 0x05, 0x65,
+ 0x78, 0x70, 0x72, 0x73, 0x42, 0x2c, 0x0a, 0x0c, 0x64, 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
+ 0x65, 0x78, 0x70, 0x72, 0x42, 0x09, 0x45, 0x76, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
+ 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8,
+ 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_cel_expr_eval_proto_rawDescOnce sync.Once
+ file_cel_expr_eval_proto_rawDescData = file_cel_expr_eval_proto_rawDesc
+)
+
+func file_cel_expr_eval_proto_rawDescGZIP() []byte {
+ file_cel_expr_eval_proto_rawDescOnce.Do(func() {
+ file_cel_expr_eval_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_eval_proto_rawDescData)
+ })
+ return file_cel_expr_eval_proto_rawDescData
+}
+
+var file_cel_expr_eval_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
+var file_cel_expr_eval_proto_goTypes = []any{
+ (*EvalState)(nil), // 0: cel.expr.EvalState
+ (*ExprValue)(nil), // 1: cel.expr.ExprValue
+ (*ErrorSet)(nil), // 2: cel.expr.ErrorSet
+ (*Status)(nil), // 3: cel.expr.Status
+ (*UnknownSet)(nil), // 4: cel.expr.UnknownSet
+ (*EvalState_Result)(nil), // 5: cel.expr.EvalState.Result
+ (*Value)(nil), // 6: cel.expr.Value
+ (*anypb.Any)(nil), // 7: google.protobuf.Any
+}
+var file_cel_expr_eval_proto_depIdxs = []int32{
+ 1, // 0: cel.expr.EvalState.values:type_name -> cel.expr.ExprValue
+ 5, // 1: cel.expr.EvalState.results:type_name -> cel.expr.EvalState.Result
+ 6, // 2: cel.expr.ExprValue.value:type_name -> cel.expr.Value
+ 2, // 3: cel.expr.ExprValue.error:type_name -> cel.expr.ErrorSet
+ 4, // 4: cel.expr.ExprValue.unknown:type_name -> cel.expr.UnknownSet
+ 3, // 5: cel.expr.ErrorSet.errors:type_name -> cel.expr.Status
+ 7, // 6: cel.expr.Status.details:type_name -> google.protobuf.Any
+ 7, // [7:7] is the sub-list for method output_type
+ 7, // [7:7] is the sub-list for method input_type
+ 7, // [7:7] is the sub-list for extension type_name
+ 7, // [7:7] is the sub-list for extension extendee
+ 0, // [0:7] is the sub-list for field type_name
+}
+
+func init() { file_cel_expr_eval_proto_init() }
+func file_cel_expr_eval_proto_init() {
+ if File_cel_expr_eval_proto != nil {
+ return
+ }
+ file_cel_expr_value_proto_init()
+ file_cel_expr_eval_proto_msgTypes[1].OneofWrappers = []any{
+ (*ExprValue_Value)(nil),
+ (*ExprValue_Error)(nil),
+ (*ExprValue_Unknown)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_cel_expr_eval_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 6,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_cel_expr_eval_proto_goTypes,
+ DependencyIndexes: file_cel_expr_eval_proto_depIdxs,
+ MessageInfos: file_cel_expr_eval_proto_msgTypes,
+ }.Build()
+ File_cel_expr_eval_proto = out.File
+ file_cel_expr_eval_proto_rawDesc = nil
+ file_cel_expr_eval_proto_goTypes = nil
+ file_cel_expr_eval_proto_depIdxs = nil
+}
diff --git a/vendor/cel.dev/expr/explain.pb.go b/vendor/cel.dev/expr/explain.pb.go
new file mode 100644
index 000000000..79fd5443b
--- /dev/null
+++ b/vendor/cel.dev/expr/explain.pb.go
@@ -0,0 +1,236 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.28.1
+// protoc v3.21.5
+// source: cel/expr/explain.proto
+
+package expr
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Deprecated: Do not use.
+type Explain struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+ ExprSteps []*Explain_ExprStep `protobuf:"bytes,2,rep,name=expr_steps,json=exprSteps,proto3" json:"expr_steps,omitempty"`
+}
+
+func (x *Explain) Reset() {
+ *x = Explain{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_explain_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Explain) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Explain) ProtoMessage() {}
+
+func (x *Explain) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_explain_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Explain.ProtoReflect.Descriptor instead.
+func (*Explain) Descriptor() ([]byte, []int) {
+ return file_cel_expr_explain_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Explain) GetValues() []*Value {
+ if x != nil {
+ return x.Values
+ }
+ return nil
+}
+
+func (x *Explain) GetExprSteps() []*Explain_ExprStep {
+ if x != nil {
+ return x.ExprSteps
+ }
+ return nil
+}
+
+type Explain_ExprStep struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+ ValueIndex int32 `protobuf:"varint,2,opt,name=value_index,json=valueIndex,proto3" json:"value_index,omitempty"`
+}
+
+func (x *Explain_ExprStep) Reset() {
+ *x = Explain_ExprStep{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_explain_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Explain_ExprStep) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Explain_ExprStep) ProtoMessage() {}
+
+func (x *Explain_ExprStep) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_explain_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Explain_ExprStep.ProtoReflect.Descriptor instead.
+func (*Explain_ExprStep) Descriptor() ([]byte, []int) {
+ return file_cel_expr_explain_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (x *Explain_ExprStep) GetId() int64 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+func (x *Explain_ExprStep) GetValueIndex() int32 {
+ if x != nil {
+ return x.ValueIndex
+ }
+ return 0
+}
+
+var File_cel_expr_explain_proto protoreflect.FileDescriptor
+
+var file_cel_expr_explain_proto_rawDesc = []byte{
+ 0x0a, 0x16, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x65, 0x78, 0x70, 0x6c, 0x61,
+ 0x69, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78,
+ 0x70, 0x72, 0x1a, 0x14, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xae, 0x01, 0x0a, 0x07, 0x45, 0x78, 0x70,
+ 0x6c, 0x61, 0x69, 0x6e, 0x12, 0x27, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x39, 0x0a,
+ 0x0a, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x73, 0x74, 0x65, 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70,
+ 0x6c, 0x61, 0x69, 0x6e, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x53, 0x74, 0x65, 0x70, 0x52, 0x09, 0x65,
+ 0x78, 0x70, 0x72, 0x53, 0x74, 0x65, 0x70, 0x73, 0x1a, 0x3b, 0x0a, 0x08, 0x45, 0x78, 0x70, 0x72,
+ 0x53, 0x74, 0x65, 0x70, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03,
+ 0x52, 0x02, 0x69, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x69, 0x6e,
+ 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x49, 0x6e, 0x64, 0x65, 0x78, 0x3a, 0x02, 0x18, 0x01, 0x42, 0x2f, 0x0a, 0x0c, 0x64, 0x65, 0x76,
+ 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0c, 0x45, 0x78, 0x70, 0x6c, 0x61,
+ 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64,
+ 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x33,
+}
+
+var (
+ file_cel_expr_explain_proto_rawDescOnce sync.Once
+ file_cel_expr_explain_proto_rawDescData = file_cel_expr_explain_proto_rawDesc
+)
+
+func file_cel_expr_explain_proto_rawDescGZIP() []byte {
+ file_cel_expr_explain_proto_rawDescOnce.Do(func() {
+ file_cel_expr_explain_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_explain_proto_rawDescData)
+ })
+ return file_cel_expr_explain_proto_rawDescData
+}
+
+var file_cel_expr_explain_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_cel_expr_explain_proto_goTypes = []interface{}{
+ (*Explain)(nil), // 0: cel.expr.Explain
+ (*Explain_ExprStep)(nil), // 1: cel.expr.Explain.ExprStep
+ (*Value)(nil), // 2: cel.expr.Value
+}
+var file_cel_expr_explain_proto_depIdxs = []int32{
+ 2, // 0: cel.expr.Explain.values:type_name -> cel.expr.Value
+ 1, // 1: cel.expr.Explain.expr_steps:type_name -> cel.expr.Explain.ExprStep
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 2, // [2:2] is the sub-list for extension type_name
+ 2, // [2:2] is the sub-list for extension extendee
+ 0, // [0:2] is the sub-list for field type_name
+}
+
+func init() { file_cel_expr_explain_proto_init() }
+func file_cel_expr_explain_proto_init() {
+ if File_cel_expr_explain_proto != nil {
+ return
+ }
+ file_cel_expr_value_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_cel_expr_explain_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Explain); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_explain_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Explain_ExprStep); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_cel_expr_explain_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_cel_expr_explain_proto_goTypes,
+ DependencyIndexes: file_cel_expr_explain_proto_depIdxs,
+ MessageInfos: file_cel_expr_explain_proto_msgTypes,
+ }.Build()
+ File_cel_expr_explain_proto = out.File
+ file_cel_expr_explain_proto_rawDesc = nil
+ file_cel_expr_explain_proto_goTypes = nil
+ file_cel_expr_explain_proto_depIdxs = nil
+}
diff --git a/vendor/cel.dev/expr/regen_go_proto.sh b/vendor/cel.dev/expr/regen_go_proto.sh
new file mode 100644
index 000000000..fdcbb3ce2
--- /dev/null
+++ b/vendor/cel.dev/expr/regen_go_proto.sh
@@ -0,0 +1,9 @@
+#!/bin/sh
+bazel build //proto/cel/expr/conformance/...
+files=($(bazel aquery 'kind(proto, //proto/cel/expr/conformance/...)' | grep Outputs | grep "[.]pb[.]go" | sed 's/Outputs: \[//' | sed 's/\]//' | tr "," "\n"))
+for src in ${files[@]};
+do
+ dst=$(echo $src | sed 's/\(.*\/cel.dev\/expr\/\(.*\)\)/\2/')
+ echo "copying $dst"
+ $(cp $src $dst)
+done
diff --git a/vendor/cel.dev/expr/regen_go_proto_canonical_protos.sh b/vendor/cel.dev/expr/regen_go_proto_canonical_protos.sh
new file mode 100644
index 000000000..9a13479e4
--- /dev/null
+++ b/vendor/cel.dev/expr/regen_go_proto_canonical_protos.sh
@@ -0,0 +1,10 @@
+#!/usr/bin/env bash
+bazel build //proto/cel/expr:all
+
+rm -vf ./*.pb.go
+
+files=( $(bazel cquery //proto/cel/expr:expr_go_proto --output=starlark --starlark:expr="'\n'.join([f.path for f in target.output_groups.go_generated_srcs.to_list()])") )
+for src in "${files[@]}";
+do
+ cp -v "${src}" ./
+done
diff --git a/vendor/cel.dev/expr/syntax.pb.go b/vendor/cel.dev/expr/syntax.pb.go
new file mode 100644
index 000000000..48a952872
--- /dev/null
+++ b/vendor/cel.dev/expr/syntax.pb.go
@@ -0,0 +1,1633 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.28.1
+// protoc v3.21.5
+// source: cel/expr/syntax.proto
+
+package expr
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ durationpb "google.golang.org/protobuf/types/known/durationpb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type SourceInfo_Extension_Component int32
+
+const (
+ SourceInfo_Extension_COMPONENT_UNSPECIFIED SourceInfo_Extension_Component = 0
+ SourceInfo_Extension_COMPONENT_PARSER SourceInfo_Extension_Component = 1
+ SourceInfo_Extension_COMPONENT_TYPE_CHECKER SourceInfo_Extension_Component = 2
+ SourceInfo_Extension_COMPONENT_RUNTIME SourceInfo_Extension_Component = 3
+)
+
+// Enum value maps for SourceInfo_Extension_Component.
+var (
+ SourceInfo_Extension_Component_name = map[int32]string{
+ 0: "COMPONENT_UNSPECIFIED",
+ 1: "COMPONENT_PARSER",
+ 2: "COMPONENT_TYPE_CHECKER",
+ 3: "COMPONENT_RUNTIME",
+ }
+ SourceInfo_Extension_Component_value = map[string]int32{
+ "COMPONENT_UNSPECIFIED": 0,
+ "COMPONENT_PARSER": 1,
+ "COMPONENT_TYPE_CHECKER": 2,
+ "COMPONENT_RUNTIME": 3,
+ }
+)
+
+func (x SourceInfo_Extension_Component) Enum() *SourceInfo_Extension_Component {
+ p := new(SourceInfo_Extension_Component)
+ *p = x
+ return p
+}
+
+func (x SourceInfo_Extension_Component) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SourceInfo_Extension_Component) Descriptor() protoreflect.EnumDescriptor {
+ return file_cel_expr_syntax_proto_enumTypes[0].Descriptor()
+}
+
+func (SourceInfo_Extension_Component) Type() protoreflect.EnumType {
+ return &file_cel_expr_syntax_proto_enumTypes[0]
+}
+
+func (x SourceInfo_Extension_Component) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use SourceInfo_Extension_Component.Descriptor instead.
+func (SourceInfo_Extension_Component) EnumDescriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{3, 2, 0}
+}
+
+type ParsedExpr struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Expr *Expr `protobuf:"bytes,2,opt,name=expr,proto3" json:"expr,omitempty"`
+ SourceInfo *SourceInfo `protobuf:"bytes,3,opt,name=source_info,json=sourceInfo,proto3" json:"source_info,omitempty"`
+}
+
+func (x *ParsedExpr) Reset() {
+ *x = ParsedExpr{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ParsedExpr) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ParsedExpr) ProtoMessage() {}
+
+func (x *ParsedExpr) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ParsedExpr.ProtoReflect.Descriptor instead.
+func (*ParsedExpr) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ParsedExpr) GetExpr() *Expr {
+ if x != nil {
+ return x.Expr
+ }
+ return nil
+}
+
+func (x *ParsedExpr) GetSourceInfo() *SourceInfo {
+ if x != nil {
+ return x.SourceInfo
+ }
+ return nil
+}
+
+type Expr struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id int64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"`
+ // Types that are assignable to ExprKind:
+ //
+ // *Expr_ConstExpr
+ // *Expr_IdentExpr
+ // *Expr_SelectExpr
+ // *Expr_CallExpr
+ // *Expr_ListExpr
+ // *Expr_StructExpr
+ // *Expr_ComprehensionExpr
+ ExprKind isExpr_ExprKind `protobuf_oneof:"expr_kind"`
+}
+
+func (x *Expr) Reset() {
+ *x = Expr{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr) ProtoMessage() {}
+
+func (x *Expr) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr.ProtoReflect.Descriptor instead.
+func (*Expr) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *Expr) GetId() int64 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+func (m *Expr) GetExprKind() isExpr_ExprKind {
+ if m != nil {
+ return m.ExprKind
+ }
+ return nil
+}
+
+func (x *Expr) GetConstExpr() *Constant {
+ if x, ok := x.GetExprKind().(*Expr_ConstExpr); ok {
+ return x.ConstExpr
+ }
+ return nil
+}
+
+func (x *Expr) GetIdentExpr() *Expr_Ident {
+ if x, ok := x.GetExprKind().(*Expr_IdentExpr); ok {
+ return x.IdentExpr
+ }
+ return nil
+}
+
+func (x *Expr) GetSelectExpr() *Expr_Select {
+ if x, ok := x.GetExprKind().(*Expr_SelectExpr); ok {
+ return x.SelectExpr
+ }
+ return nil
+}
+
+func (x *Expr) GetCallExpr() *Expr_Call {
+ if x, ok := x.GetExprKind().(*Expr_CallExpr); ok {
+ return x.CallExpr
+ }
+ return nil
+}
+
+func (x *Expr) GetListExpr() *Expr_CreateList {
+ if x, ok := x.GetExprKind().(*Expr_ListExpr); ok {
+ return x.ListExpr
+ }
+ return nil
+}
+
+func (x *Expr) GetStructExpr() *Expr_CreateStruct {
+ if x, ok := x.GetExprKind().(*Expr_StructExpr); ok {
+ return x.StructExpr
+ }
+ return nil
+}
+
+func (x *Expr) GetComprehensionExpr() *Expr_Comprehension {
+ if x, ok := x.GetExprKind().(*Expr_ComprehensionExpr); ok {
+ return x.ComprehensionExpr
+ }
+ return nil
+}
+
+type isExpr_ExprKind interface {
+ isExpr_ExprKind()
+}
+
+type Expr_ConstExpr struct {
+ ConstExpr *Constant `protobuf:"bytes,3,opt,name=const_expr,json=constExpr,proto3,oneof"`
+}
+
+type Expr_IdentExpr struct {
+ IdentExpr *Expr_Ident `protobuf:"bytes,4,opt,name=ident_expr,json=identExpr,proto3,oneof"`
+}
+
+type Expr_SelectExpr struct {
+ SelectExpr *Expr_Select `protobuf:"bytes,5,opt,name=select_expr,json=selectExpr,proto3,oneof"`
+}
+
+type Expr_CallExpr struct {
+ CallExpr *Expr_Call `protobuf:"bytes,6,opt,name=call_expr,json=callExpr,proto3,oneof"`
+}
+
+type Expr_ListExpr struct {
+ ListExpr *Expr_CreateList `protobuf:"bytes,7,opt,name=list_expr,json=listExpr,proto3,oneof"`
+}
+
+type Expr_StructExpr struct {
+ StructExpr *Expr_CreateStruct `protobuf:"bytes,8,opt,name=struct_expr,json=structExpr,proto3,oneof"`
+}
+
+type Expr_ComprehensionExpr struct {
+ ComprehensionExpr *Expr_Comprehension `protobuf:"bytes,9,opt,name=comprehension_expr,json=comprehensionExpr,proto3,oneof"`
+}
+
+func (*Expr_ConstExpr) isExpr_ExprKind() {}
+
+func (*Expr_IdentExpr) isExpr_ExprKind() {}
+
+func (*Expr_SelectExpr) isExpr_ExprKind() {}
+
+func (*Expr_CallExpr) isExpr_ExprKind() {}
+
+func (*Expr_ListExpr) isExpr_ExprKind() {}
+
+func (*Expr_StructExpr) isExpr_ExprKind() {}
+
+func (*Expr_ComprehensionExpr) isExpr_ExprKind() {}
+
+type Constant struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to ConstantKind:
+ //
+ // *Constant_NullValue
+ // *Constant_BoolValue
+ // *Constant_Int64Value
+ // *Constant_Uint64Value
+ // *Constant_DoubleValue
+ // *Constant_StringValue
+ // *Constant_BytesValue
+ // *Constant_DurationValue
+ // *Constant_TimestampValue
+ ConstantKind isConstant_ConstantKind `protobuf_oneof:"constant_kind"`
+}
+
+func (x *Constant) Reset() {
+ *x = Constant{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Constant) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Constant) ProtoMessage() {}
+
+func (x *Constant) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Constant.ProtoReflect.Descriptor instead.
+func (*Constant) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{2}
+}
+
+func (m *Constant) GetConstantKind() isConstant_ConstantKind {
+ if m != nil {
+ return m.ConstantKind
+ }
+ return nil
+}
+
+func (x *Constant) GetNullValue() structpb.NullValue {
+ if x, ok := x.GetConstantKind().(*Constant_NullValue); ok {
+ return x.NullValue
+ }
+ return structpb.NullValue(0)
+}
+
+func (x *Constant) GetBoolValue() bool {
+ if x, ok := x.GetConstantKind().(*Constant_BoolValue); ok {
+ return x.BoolValue
+ }
+ return false
+}
+
+func (x *Constant) GetInt64Value() int64 {
+ if x, ok := x.GetConstantKind().(*Constant_Int64Value); ok {
+ return x.Int64Value
+ }
+ return 0
+}
+
+func (x *Constant) GetUint64Value() uint64 {
+ if x, ok := x.GetConstantKind().(*Constant_Uint64Value); ok {
+ return x.Uint64Value
+ }
+ return 0
+}
+
+func (x *Constant) GetDoubleValue() float64 {
+ if x, ok := x.GetConstantKind().(*Constant_DoubleValue); ok {
+ return x.DoubleValue
+ }
+ return 0
+}
+
+func (x *Constant) GetStringValue() string {
+ if x, ok := x.GetConstantKind().(*Constant_StringValue); ok {
+ return x.StringValue
+ }
+ return ""
+}
+
+func (x *Constant) GetBytesValue() []byte {
+ if x, ok := x.GetConstantKind().(*Constant_BytesValue); ok {
+ return x.BytesValue
+ }
+ return nil
+}
+
+// Deprecated: Do not use.
+func (x *Constant) GetDurationValue() *durationpb.Duration {
+ if x, ok := x.GetConstantKind().(*Constant_DurationValue); ok {
+ return x.DurationValue
+ }
+ return nil
+}
+
+// Deprecated: Do not use.
+func (x *Constant) GetTimestampValue() *timestamppb.Timestamp {
+ if x, ok := x.GetConstantKind().(*Constant_TimestampValue); ok {
+ return x.TimestampValue
+ }
+ return nil
+}
+
+type isConstant_ConstantKind interface {
+ isConstant_ConstantKind()
+}
+
+type Constant_NullValue struct {
+ NullValue structpb.NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"`
+}
+
+type Constant_BoolValue struct {
+ BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof"`
+}
+
+type Constant_Int64Value struct {
+ Int64Value int64 `protobuf:"varint,3,opt,name=int64_value,json=int64Value,proto3,oneof"`
+}
+
+type Constant_Uint64Value struct {
+ Uint64Value uint64 `protobuf:"varint,4,opt,name=uint64_value,json=uint64Value,proto3,oneof"`
+}
+
+type Constant_DoubleValue struct {
+ DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,proto3,oneof"`
+}
+
+type Constant_StringValue struct {
+ StringValue string `protobuf:"bytes,6,opt,name=string_value,json=stringValue,proto3,oneof"`
+}
+
+type Constant_BytesValue struct {
+ BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"`
+}
+
+type Constant_DurationValue struct {
+ // Deprecated: Do not use.
+ DurationValue *durationpb.Duration `protobuf:"bytes,8,opt,name=duration_value,json=durationValue,proto3,oneof"`
+}
+
+type Constant_TimestampValue struct {
+ // Deprecated: Do not use.
+ TimestampValue *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=timestamp_value,json=timestampValue,proto3,oneof"`
+}
+
+func (*Constant_NullValue) isConstant_ConstantKind() {}
+
+func (*Constant_BoolValue) isConstant_ConstantKind() {}
+
+func (*Constant_Int64Value) isConstant_ConstantKind() {}
+
+func (*Constant_Uint64Value) isConstant_ConstantKind() {}
+
+func (*Constant_DoubleValue) isConstant_ConstantKind() {}
+
+func (*Constant_StringValue) isConstant_ConstantKind() {}
+
+func (*Constant_BytesValue) isConstant_ConstantKind() {}
+
+func (*Constant_DurationValue) isConstant_ConstantKind() {}
+
+func (*Constant_TimestampValue) isConstant_ConstantKind() {}
+
+type SourceInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ SyntaxVersion string `protobuf:"bytes,1,opt,name=syntax_version,json=syntaxVersion,proto3" json:"syntax_version,omitempty"`
+ Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"`
+ LineOffsets []int32 `protobuf:"varint,3,rep,packed,name=line_offsets,json=lineOffsets,proto3" json:"line_offsets,omitempty"`
+ Positions map[int64]int32 `protobuf:"bytes,4,rep,name=positions,proto3" json:"positions,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"`
+ MacroCalls map[int64]*Expr `protobuf:"bytes,5,rep,name=macro_calls,json=macroCalls,proto3" json:"macro_calls,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ Extensions []*SourceInfo_Extension `protobuf:"bytes,6,rep,name=extensions,proto3" json:"extensions,omitempty"`
+}
+
+func (x *SourceInfo) Reset() {
+ *x = SourceInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SourceInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SourceInfo) ProtoMessage() {}
+
+func (x *SourceInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SourceInfo.ProtoReflect.Descriptor instead.
+func (*SourceInfo) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *SourceInfo) GetSyntaxVersion() string {
+ if x != nil {
+ return x.SyntaxVersion
+ }
+ return ""
+}
+
+func (x *SourceInfo) GetLocation() string {
+ if x != nil {
+ return x.Location
+ }
+ return ""
+}
+
+func (x *SourceInfo) GetLineOffsets() []int32 {
+ if x != nil {
+ return x.LineOffsets
+ }
+ return nil
+}
+
+func (x *SourceInfo) GetPositions() map[int64]int32 {
+ if x != nil {
+ return x.Positions
+ }
+ return nil
+}
+
+func (x *SourceInfo) GetMacroCalls() map[int64]*Expr {
+ if x != nil {
+ return x.MacroCalls
+ }
+ return nil
+}
+
+func (x *SourceInfo) GetExtensions() []*SourceInfo_Extension {
+ if x != nil {
+ return x.Extensions
+ }
+ return nil
+}
+
+type Expr_Ident struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *Expr_Ident) Reset() {
+ *x = Expr_Ident{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_Ident) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_Ident) ProtoMessage() {}
+
+func (x *Expr_Ident) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_Ident.ProtoReflect.Descriptor instead.
+func (*Expr_Ident) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 0}
+}
+
+func (x *Expr_Ident) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+type Expr_Select struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Operand *Expr `protobuf:"bytes,1,opt,name=operand,proto3" json:"operand,omitempty"`
+ Field string `protobuf:"bytes,2,opt,name=field,proto3" json:"field,omitempty"`
+ TestOnly bool `protobuf:"varint,3,opt,name=test_only,json=testOnly,proto3" json:"test_only,omitempty"`
+}
+
+func (x *Expr_Select) Reset() {
+ *x = Expr_Select{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_Select) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_Select) ProtoMessage() {}
+
+func (x *Expr_Select) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_Select.ProtoReflect.Descriptor instead.
+func (*Expr_Select) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 1}
+}
+
+func (x *Expr_Select) GetOperand() *Expr {
+ if x != nil {
+ return x.Operand
+ }
+ return nil
+}
+
+func (x *Expr_Select) GetField() string {
+ if x != nil {
+ return x.Field
+ }
+ return ""
+}
+
+func (x *Expr_Select) GetTestOnly() bool {
+ if x != nil {
+ return x.TestOnly
+ }
+ return false
+}
+
+type Expr_Call struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Target *Expr `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"`
+ Function string `protobuf:"bytes,2,opt,name=function,proto3" json:"function,omitempty"`
+ Args []*Expr `protobuf:"bytes,3,rep,name=args,proto3" json:"args,omitempty"`
+}
+
+func (x *Expr_Call) Reset() {
+ *x = Expr_Call{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_Call) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_Call) ProtoMessage() {}
+
+func (x *Expr_Call) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_Call.ProtoReflect.Descriptor instead.
+func (*Expr_Call) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 2}
+}
+
+func (x *Expr_Call) GetTarget() *Expr {
+ if x != nil {
+ return x.Target
+ }
+ return nil
+}
+
+func (x *Expr_Call) GetFunction() string {
+ if x != nil {
+ return x.Function
+ }
+ return ""
+}
+
+func (x *Expr_Call) GetArgs() []*Expr {
+ if x != nil {
+ return x.Args
+ }
+ return nil
+}
+
+type Expr_CreateList struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Elements []*Expr `protobuf:"bytes,1,rep,name=elements,proto3" json:"elements,omitempty"`
+ OptionalIndices []int32 `protobuf:"varint,2,rep,packed,name=optional_indices,json=optionalIndices,proto3" json:"optional_indices,omitempty"`
+}
+
+func (x *Expr_CreateList) Reset() {
+ *x = Expr_CreateList{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_CreateList) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_CreateList) ProtoMessage() {}
+
+func (x *Expr_CreateList) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_CreateList.ProtoReflect.Descriptor instead.
+func (*Expr_CreateList) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 3}
+}
+
+func (x *Expr_CreateList) GetElements() []*Expr {
+ if x != nil {
+ return x.Elements
+ }
+ return nil
+}
+
+func (x *Expr_CreateList) GetOptionalIndices() []int32 {
+ if x != nil {
+ return x.OptionalIndices
+ }
+ return nil
+}
+
+type Expr_CreateStruct struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ MessageName string `protobuf:"bytes,1,opt,name=message_name,json=messageName,proto3" json:"message_name,omitempty"`
+ Entries []*Expr_CreateStruct_Entry `protobuf:"bytes,2,rep,name=entries,proto3" json:"entries,omitempty"`
+}
+
+func (x *Expr_CreateStruct) Reset() {
+ *x = Expr_CreateStruct{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_CreateStruct) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_CreateStruct) ProtoMessage() {}
+
+func (x *Expr_CreateStruct) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_CreateStruct.ProtoReflect.Descriptor instead.
+func (*Expr_CreateStruct) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 4}
+}
+
+func (x *Expr_CreateStruct) GetMessageName() string {
+ if x != nil {
+ return x.MessageName
+ }
+ return ""
+}
+
+func (x *Expr_CreateStruct) GetEntries() []*Expr_CreateStruct_Entry {
+ if x != nil {
+ return x.Entries
+ }
+ return nil
+}
+
+type Expr_Comprehension struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ IterVar string `protobuf:"bytes,1,opt,name=iter_var,json=iterVar,proto3" json:"iter_var,omitempty"`
+ IterRange *Expr `protobuf:"bytes,2,opt,name=iter_range,json=iterRange,proto3" json:"iter_range,omitempty"`
+ AccuVar string `protobuf:"bytes,3,opt,name=accu_var,json=accuVar,proto3" json:"accu_var,omitempty"`
+ AccuInit *Expr `protobuf:"bytes,4,opt,name=accu_init,json=accuInit,proto3" json:"accu_init,omitempty"`
+ LoopCondition *Expr `protobuf:"bytes,5,opt,name=loop_condition,json=loopCondition,proto3" json:"loop_condition,omitempty"`
+ LoopStep *Expr `protobuf:"bytes,6,opt,name=loop_step,json=loopStep,proto3" json:"loop_step,omitempty"`
+ Result *Expr `protobuf:"bytes,7,opt,name=result,proto3" json:"result,omitempty"`
+}
+
+func (x *Expr_Comprehension) Reset() {
+ *x = Expr_Comprehension{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_Comprehension) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_Comprehension) ProtoMessage() {}
+
+func (x *Expr_Comprehension) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_Comprehension.ProtoReflect.Descriptor instead.
+func (*Expr_Comprehension) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 5}
+}
+
+func (x *Expr_Comprehension) GetIterVar() string {
+ if x != nil {
+ return x.IterVar
+ }
+ return ""
+}
+
+func (x *Expr_Comprehension) GetIterRange() *Expr {
+ if x != nil {
+ return x.IterRange
+ }
+ return nil
+}
+
+func (x *Expr_Comprehension) GetAccuVar() string {
+ if x != nil {
+ return x.AccuVar
+ }
+ return ""
+}
+
+func (x *Expr_Comprehension) GetAccuInit() *Expr {
+ if x != nil {
+ return x.AccuInit
+ }
+ return nil
+}
+
+func (x *Expr_Comprehension) GetLoopCondition() *Expr {
+ if x != nil {
+ return x.LoopCondition
+ }
+ return nil
+}
+
+func (x *Expr_Comprehension) GetLoopStep() *Expr {
+ if x != nil {
+ return x.LoopStep
+ }
+ return nil
+}
+
+func (x *Expr_Comprehension) GetResult() *Expr {
+ if x != nil {
+ return x.Result
+ }
+ return nil
+}
+
+type Expr_CreateStruct_Entry struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+ // Types that are assignable to KeyKind:
+ //
+ // *Expr_CreateStruct_Entry_FieldKey
+ // *Expr_CreateStruct_Entry_MapKey
+ KeyKind isExpr_CreateStruct_Entry_KeyKind `protobuf_oneof:"key_kind"`
+ Value *Expr `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"`
+ OptionalEntry bool `protobuf:"varint,5,opt,name=optional_entry,json=optionalEntry,proto3" json:"optional_entry,omitempty"`
+}
+
+func (x *Expr_CreateStruct_Entry) Reset() {
+ *x = Expr_CreateStruct_Entry{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_CreateStruct_Entry) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_CreateStruct_Entry) ProtoMessage() {}
+
+func (x *Expr_CreateStruct_Entry) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_CreateStruct_Entry.ProtoReflect.Descriptor instead.
+func (*Expr_CreateStruct_Entry) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 4, 0}
+}
+
+func (x *Expr_CreateStruct_Entry) GetId() int64 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+func (m *Expr_CreateStruct_Entry) GetKeyKind() isExpr_CreateStruct_Entry_KeyKind {
+ if m != nil {
+ return m.KeyKind
+ }
+ return nil
+}
+
+func (x *Expr_CreateStruct_Entry) GetFieldKey() string {
+ if x, ok := x.GetKeyKind().(*Expr_CreateStruct_Entry_FieldKey); ok {
+ return x.FieldKey
+ }
+ return ""
+}
+
+func (x *Expr_CreateStruct_Entry) GetMapKey() *Expr {
+ if x, ok := x.GetKeyKind().(*Expr_CreateStruct_Entry_MapKey); ok {
+ return x.MapKey
+ }
+ return nil
+}
+
+func (x *Expr_CreateStruct_Entry) GetValue() *Expr {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+func (x *Expr_CreateStruct_Entry) GetOptionalEntry() bool {
+ if x != nil {
+ return x.OptionalEntry
+ }
+ return false
+}
+
+type isExpr_CreateStruct_Entry_KeyKind interface {
+ isExpr_CreateStruct_Entry_KeyKind()
+}
+
+type Expr_CreateStruct_Entry_FieldKey struct {
+ FieldKey string `protobuf:"bytes,2,opt,name=field_key,json=fieldKey,proto3,oneof"`
+}
+
+type Expr_CreateStruct_Entry_MapKey struct {
+ MapKey *Expr `protobuf:"bytes,3,opt,name=map_key,json=mapKey,proto3,oneof"`
+}
+
+func (*Expr_CreateStruct_Entry_FieldKey) isExpr_CreateStruct_Entry_KeyKind() {}
+
+func (*Expr_CreateStruct_Entry_MapKey) isExpr_CreateStruct_Entry_KeyKind() {}
+
+type SourceInfo_Extension struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+ AffectedComponents []SourceInfo_Extension_Component `protobuf:"varint,2,rep,packed,name=affected_components,json=affectedComponents,proto3,enum=cel.expr.SourceInfo_Extension_Component" json:"affected_components,omitempty"`
+ Version *SourceInfo_Extension_Version `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"`
+}
+
+func (x *SourceInfo_Extension) Reset() {
+ *x = SourceInfo_Extension{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SourceInfo_Extension) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SourceInfo_Extension) ProtoMessage() {}
+
+func (x *SourceInfo_Extension) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[13]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SourceInfo_Extension.ProtoReflect.Descriptor instead.
+func (*SourceInfo_Extension) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{3, 2}
+}
+
+func (x *SourceInfo_Extension) GetId() string {
+ if x != nil {
+ return x.Id
+ }
+ return ""
+}
+
+func (x *SourceInfo_Extension) GetAffectedComponents() []SourceInfo_Extension_Component {
+ if x != nil {
+ return x.AffectedComponents
+ }
+ return nil
+}
+
+func (x *SourceInfo_Extension) GetVersion() *SourceInfo_Extension_Version {
+ if x != nil {
+ return x.Version
+ }
+ return nil
+}
+
+type SourceInfo_Extension_Version struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Major int64 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"`
+ Minor int64 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"`
+}
+
+func (x *SourceInfo_Extension_Version) Reset() {
+ *x = SourceInfo_Extension_Version{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SourceInfo_Extension_Version) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SourceInfo_Extension_Version) ProtoMessage() {}
+
+func (x *SourceInfo_Extension_Version) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SourceInfo_Extension_Version.ProtoReflect.Descriptor instead.
+func (*SourceInfo_Extension_Version) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{3, 2, 0}
+}
+
+func (x *SourceInfo_Extension_Version) GetMajor() int64 {
+ if x != nil {
+ return x.Major
+ }
+ return 0
+}
+
+func (x *SourceInfo_Extension_Version) GetMinor() int64 {
+ if x != nil {
+ return x.Minor
+ }
+ return 0
+}
+
+var File_cel_expr_syntax_proto protoreflect.FileDescriptor
+
+var file_cel_expr_syntax_proto_rawDesc = []byte{
+ 0x0a, 0x15, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x73, 0x79, 0x6e, 0x74, 0x61,
+ 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70,
+ 0x72, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x22, 0x67, 0x0a, 0x0a, 0x50, 0x61, 0x72, 0x73, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x12, 0x22,
+ 0x0a, 0x04, 0x65, 0x78, 0x70, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63,
+ 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x04, 0x65, 0x78,
+ 0x70, 0x72, 0x12, 0x35, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66,
+ 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78,
+ 0x70, 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0xfd, 0x0a, 0x0a, 0x04, 0x45, 0x78,
+ 0x70, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02,
+ 0x69, 0x64, 0x12, 0x33, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70,
+ 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x09, 0x63, 0x6f,
+ 0x6e, 0x73, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x35, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74,
+ 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x49, 0x64, 0x65, 0x6e,
+ 0x74, 0x48, 0x00, 0x52, 0x09, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x38,
+ 0x0a, 0x0b, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45,
+ 0x78, 0x70, 0x72, 0x2e, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x65,
+ 0x6c, 0x65, 0x63, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x32, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c,
+ 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x61, 0x6c, 0x6c,
+ 0x48, 0x00, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x45, 0x78, 0x70, 0x72, 0x12, 0x38, 0x0a, 0x09,
+ 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x19, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e,
+ 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x08, 0x6c, 0x69,
+ 0x73, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x3e, 0x0a, 0x0b, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74,
+ 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x72, 0x65, 0x61,
+ 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x74, 0x72, 0x75,
+ 0x63, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x4d, 0x0a, 0x12, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65,
+ 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x09, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78,
+ 0x70, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
+ 0x48, 0x00, 0x52, 0x11, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f,
+ 0x6e, 0x45, 0x78, 0x70, 0x72, 0x1a, 0x1b, 0x0a, 0x05, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x12, 0x12,
+ 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x1a, 0x65, 0x0a, 0x06, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x12, 0x28, 0x0a, 0x07,
+ 0x6f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e,
+ 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x07, 0x6f,
+ 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x1b, 0x0a, 0x09,
+ 0x74, 0x65, 0x73, 0x74, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x08, 0x74, 0x65, 0x73, 0x74, 0x4f, 0x6e, 0x6c, 0x79, 0x1a, 0x6e, 0x0a, 0x04, 0x43, 0x61, 0x6c,
+ 0x6c, 0x12, 0x26, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70,
+ 0x72, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x66, 0x75, 0x6e,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6e,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18, 0x03, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45,
+ 0x78, 0x70, 0x72, 0x52, 0x04, 0x61, 0x72, 0x67, 0x73, 0x1a, 0x63, 0x0a, 0x0a, 0x43, 0x72, 0x65,
+ 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x08, 0x65, 0x6c, 0x65, 0x6d, 0x65,
+ 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
+ 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x65, 0x6c, 0x65, 0x6d, 0x65,
+ 0x6e, 0x74, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f,
+ 0x69, 0x6e, 0x64, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0f, 0x6f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x49, 0x6e, 0x64, 0x69, 0x63, 0x65, 0x73, 0x1a, 0xab,
+ 0x02, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x12,
+ 0x21, 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4e, 0x61,
+ 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45,
+ 0x78, 0x70, 0x72, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74,
+ 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a,
+ 0xba, 0x01, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x09, 0x66, 0x69, 0x65,
+ 0x6c, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08,
+ 0x66, 0x69, 0x65, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x07, 0x6d, 0x61, 0x70, 0x5f,
+ 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
+ 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x48, 0x00, 0x52, 0x06, 0x6d, 0x61, 0x70,
+ 0x4b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78,
+ 0x70, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x0d, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x42, 0x0a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x1a, 0xad, 0x02, 0x0a,
+ 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x19,
+ 0x0a, 0x08, 0x69, 0x74, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x07, 0x69, 0x74, 0x65, 0x72, 0x56, 0x61, 0x72, 0x12, 0x2d, 0x0a, 0x0a, 0x69, 0x74, 0x65,
+ 0x72, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e,
+ 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x09, 0x69,
+ 0x74, 0x65, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x63, 0x63, 0x75,
+ 0x5f, 0x76, 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x63, 0x63, 0x75,
+ 0x56, 0x61, 0x72, 0x12, 0x2b, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x75, 0x5f, 0x69, 0x6e, 0x69, 0x74,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70,
+ 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x61, 0x63, 0x63, 0x75, 0x49, 0x6e, 0x69, 0x74,
+ 0x12, 0x35, 0x0a, 0x0e, 0x6c, 0x6f, 0x6f, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69,
+ 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65,
+ 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x0d, 0x6c, 0x6f, 0x6f, 0x70, 0x43, 0x6f,
+ 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x09, 0x6c, 0x6f, 0x6f, 0x70, 0x5f,
+ 0x73, 0x74, 0x65, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x6c, 0x6f, 0x6f, 0x70,
+ 0x53, 0x74, 0x65, 0x70, 0x12, 0x26, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x07,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x45, 0x78, 0x70, 0x72, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x42, 0x0b, 0x0a, 0x09,
+ 0x65, 0x78, 0x70, 0x72, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xc1, 0x03, 0x0a, 0x08, 0x43, 0x6f,
+ 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c,
+ 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56,
+ 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74,
+ 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x75, 0x69, 0x6e, 0x74, 0x36,
+ 0x34, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52,
+ 0x0b, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c,
+ 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01,
+ 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e,
+ 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62,
+ 0x79, 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x46, 0x0a, 0x0e, 0x64, 0x75, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x18, 0x01,
+ 0x48, 0x00, 0x52, 0x0d, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x12, 0x49, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
+ 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x0e, 0x74, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0f, 0x0a, 0x0d,
+ 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xac, 0x06,
+ 0x0a, 0x0a, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x25, 0x0a, 0x0e,
+ 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x56, 0x65, 0x72, 0x73,
+ 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x21, 0x0a, 0x0c, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x73, 0x18,
+ 0x03, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0b, 0x6c, 0x69, 0x6e, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65,
+ 0x74, 0x73, 0x12, 0x41, 0x0a, 0x09, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18,
+ 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72,
+ 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x50, 0x6f, 0x73, 0x69,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x70, 0x6f, 0x73, 0x69,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x45, 0x0a, 0x0b, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x5f, 0x63,
+ 0x61, 0x6c, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x65, 0x6c,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f,
+ 0x2e, 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x52, 0x0a, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x12, 0x3e, 0x0a, 0x0a,
+ 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x1e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
+ 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x3c, 0x0a, 0x0e,
+ 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10,
+ 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79,
+ 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4d, 0x0a, 0x0f, 0x4d, 0x61,
+ 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
+ 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
+ 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e,
+ 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0xe0, 0x02, 0x0a, 0x09, 0x45, 0x78,
+ 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x59, 0x0a, 0x13, 0x61, 0x66, 0x66, 0x65, 0x63,
+ 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02,
+ 0x20, 0x03, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e,
+ 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x52, 0x12,
+ 0x61, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e,
+ 0x74, 0x73, 0x12, 0x40, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73,
+ 0x69, 0x6f, 0x6e, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x76, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x1a, 0x35, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12,
+ 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05,
+ 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x22, 0x6f, 0x0a, 0x09, 0x43,
+ 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x12, 0x19, 0x0a, 0x15, 0x43, 0x4f, 0x4d, 0x50,
+ 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45,
+ 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54,
+ 0x5f, 0x50, 0x41, 0x52, 0x53, 0x45, 0x52, 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4d,
+ 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x48, 0x45, 0x43,
+ 0x4b, 0x45, 0x52, 0x10, 0x02, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x4e, 0x45,
+ 0x4e, 0x54, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x42, 0x2e, 0x0a, 0x0c,
+ 0x64, 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0b, 0x53, 0x79,
+ 0x6e, 0x74, 0x61, 0x78, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c,
+ 0x2e, 0x64, 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_cel_expr_syntax_proto_rawDescOnce sync.Once
+ file_cel_expr_syntax_proto_rawDescData = file_cel_expr_syntax_proto_rawDesc
+)
+
+func file_cel_expr_syntax_proto_rawDescGZIP() []byte {
+ file_cel_expr_syntax_proto_rawDescOnce.Do(func() {
+ file_cel_expr_syntax_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_syntax_proto_rawDescData)
+ })
+ return file_cel_expr_syntax_proto_rawDescData
+}
+
+var file_cel_expr_syntax_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_cel_expr_syntax_proto_msgTypes = make([]protoimpl.MessageInfo, 15)
+var file_cel_expr_syntax_proto_goTypes = []interface{}{
+ (SourceInfo_Extension_Component)(0), // 0: cel.expr.SourceInfo.Extension.Component
+ (*ParsedExpr)(nil), // 1: cel.expr.ParsedExpr
+ (*Expr)(nil), // 2: cel.expr.Expr
+ (*Constant)(nil), // 3: cel.expr.Constant
+ (*SourceInfo)(nil), // 4: cel.expr.SourceInfo
+ (*Expr_Ident)(nil), // 5: cel.expr.Expr.Ident
+ (*Expr_Select)(nil), // 6: cel.expr.Expr.Select
+ (*Expr_Call)(nil), // 7: cel.expr.Expr.Call
+ (*Expr_CreateList)(nil), // 8: cel.expr.Expr.CreateList
+ (*Expr_CreateStruct)(nil), // 9: cel.expr.Expr.CreateStruct
+ (*Expr_Comprehension)(nil), // 10: cel.expr.Expr.Comprehension
+ (*Expr_CreateStruct_Entry)(nil), // 11: cel.expr.Expr.CreateStruct.Entry
+ nil, // 12: cel.expr.SourceInfo.PositionsEntry
+ nil, // 13: cel.expr.SourceInfo.MacroCallsEntry
+ (*SourceInfo_Extension)(nil), // 14: cel.expr.SourceInfo.Extension
+ (*SourceInfo_Extension_Version)(nil), // 15: cel.expr.SourceInfo.Extension.Version
+ (structpb.NullValue)(0), // 16: google.protobuf.NullValue
+ (*durationpb.Duration)(nil), // 17: google.protobuf.Duration
+ (*timestamppb.Timestamp)(nil), // 18: google.protobuf.Timestamp
+}
+var file_cel_expr_syntax_proto_depIdxs = []int32{
+ 2, // 0: cel.expr.ParsedExpr.expr:type_name -> cel.expr.Expr
+ 4, // 1: cel.expr.ParsedExpr.source_info:type_name -> cel.expr.SourceInfo
+ 3, // 2: cel.expr.Expr.const_expr:type_name -> cel.expr.Constant
+ 5, // 3: cel.expr.Expr.ident_expr:type_name -> cel.expr.Expr.Ident
+ 6, // 4: cel.expr.Expr.select_expr:type_name -> cel.expr.Expr.Select
+ 7, // 5: cel.expr.Expr.call_expr:type_name -> cel.expr.Expr.Call
+ 8, // 6: cel.expr.Expr.list_expr:type_name -> cel.expr.Expr.CreateList
+ 9, // 7: cel.expr.Expr.struct_expr:type_name -> cel.expr.Expr.CreateStruct
+ 10, // 8: cel.expr.Expr.comprehension_expr:type_name -> cel.expr.Expr.Comprehension
+ 16, // 9: cel.expr.Constant.null_value:type_name -> google.protobuf.NullValue
+ 17, // 10: cel.expr.Constant.duration_value:type_name -> google.protobuf.Duration
+ 18, // 11: cel.expr.Constant.timestamp_value:type_name -> google.protobuf.Timestamp
+ 12, // 12: cel.expr.SourceInfo.positions:type_name -> cel.expr.SourceInfo.PositionsEntry
+ 13, // 13: cel.expr.SourceInfo.macro_calls:type_name -> cel.expr.SourceInfo.MacroCallsEntry
+ 14, // 14: cel.expr.SourceInfo.extensions:type_name -> cel.expr.SourceInfo.Extension
+ 2, // 15: cel.expr.Expr.Select.operand:type_name -> cel.expr.Expr
+ 2, // 16: cel.expr.Expr.Call.target:type_name -> cel.expr.Expr
+ 2, // 17: cel.expr.Expr.Call.args:type_name -> cel.expr.Expr
+ 2, // 18: cel.expr.Expr.CreateList.elements:type_name -> cel.expr.Expr
+ 11, // 19: cel.expr.Expr.CreateStruct.entries:type_name -> cel.expr.Expr.CreateStruct.Entry
+ 2, // 20: cel.expr.Expr.Comprehension.iter_range:type_name -> cel.expr.Expr
+ 2, // 21: cel.expr.Expr.Comprehension.accu_init:type_name -> cel.expr.Expr
+ 2, // 22: cel.expr.Expr.Comprehension.loop_condition:type_name -> cel.expr.Expr
+ 2, // 23: cel.expr.Expr.Comprehension.loop_step:type_name -> cel.expr.Expr
+ 2, // 24: cel.expr.Expr.Comprehension.result:type_name -> cel.expr.Expr
+ 2, // 25: cel.expr.Expr.CreateStruct.Entry.map_key:type_name -> cel.expr.Expr
+ 2, // 26: cel.expr.Expr.CreateStruct.Entry.value:type_name -> cel.expr.Expr
+ 2, // 27: cel.expr.SourceInfo.MacroCallsEntry.value:type_name -> cel.expr.Expr
+ 0, // 28: cel.expr.SourceInfo.Extension.affected_components:type_name -> cel.expr.SourceInfo.Extension.Component
+ 15, // 29: cel.expr.SourceInfo.Extension.version:type_name -> cel.expr.SourceInfo.Extension.Version
+ 30, // [30:30] is the sub-list for method output_type
+ 30, // [30:30] is the sub-list for method input_type
+ 30, // [30:30] is the sub-list for extension type_name
+ 30, // [30:30] is the sub-list for extension extendee
+ 0, // [0:30] is the sub-list for field type_name
+}
+
+func init() { file_cel_expr_syntax_proto_init() }
+func file_cel_expr_syntax_proto_init() {
+ if File_cel_expr_syntax_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_cel_expr_syntax_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ParsedExpr); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Constant); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SourceInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_Ident); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_Select); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_Call); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_CreateList); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_CreateStruct); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_Comprehension); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_CreateStruct_Entry); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SourceInfo_Extension); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SourceInfo_Extension_Version); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[1].OneofWrappers = []interface{}{
+ (*Expr_ConstExpr)(nil),
+ (*Expr_IdentExpr)(nil),
+ (*Expr_SelectExpr)(nil),
+ (*Expr_CallExpr)(nil),
+ (*Expr_ListExpr)(nil),
+ (*Expr_StructExpr)(nil),
+ (*Expr_ComprehensionExpr)(nil),
+ }
+ file_cel_expr_syntax_proto_msgTypes[2].OneofWrappers = []interface{}{
+ (*Constant_NullValue)(nil),
+ (*Constant_BoolValue)(nil),
+ (*Constant_Int64Value)(nil),
+ (*Constant_Uint64Value)(nil),
+ (*Constant_DoubleValue)(nil),
+ (*Constant_StringValue)(nil),
+ (*Constant_BytesValue)(nil),
+ (*Constant_DurationValue)(nil),
+ (*Constant_TimestampValue)(nil),
+ }
+ file_cel_expr_syntax_proto_msgTypes[10].OneofWrappers = []interface{}{
+ (*Expr_CreateStruct_Entry_FieldKey)(nil),
+ (*Expr_CreateStruct_Entry_MapKey)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_cel_expr_syntax_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 15,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_cel_expr_syntax_proto_goTypes,
+ DependencyIndexes: file_cel_expr_syntax_proto_depIdxs,
+ EnumInfos: file_cel_expr_syntax_proto_enumTypes,
+ MessageInfos: file_cel_expr_syntax_proto_msgTypes,
+ }.Build()
+ File_cel_expr_syntax_proto = out.File
+ file_cel_expr_syntax_proto_rawDesc = nil
+ file_cel_expr_syntax_proto_goTypes = nil
+ file_cel_expr_syntax_proto_depIdxs = nil
+}
diff --git a/vendor/cel.dev/expr/value.pb.go b/vendor/cel.dev/expr/value.pb.go
new file mode 100644
index 000000000..e5e29228c
--- /dev/null
+++ b/vendor/cel.dev/expr/value.pb.go
@@ -0,0 +1,653 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.28.1
+// protoc v3.21.5
+// source: cel/expr/value.proto
+
+package expr
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type Value struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to Kind:
+ //
+ // *Value_NullValue
+ // *Value_BoolValue
+ // *Value_Int64Value
+ // *Value_Uint64Value
+ // *Value_DoubleValue
+ // *Value_StringValue
+ // *Value_BytesValue
+ // *Value_EnumValue
+ // *Value_ObjectValue
+ // *Value_MapValue
+ // *Value_ListValue
+ // *Value_TypeValue
+ Kind isValue_Kind `protobuf_oneof:"kind"`
+}
+
+func (x *Value) Reset() {
+ *x = Value{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_value_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Value) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Value) ProtoMessage() {}
+
+func (x *Value) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_value_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Value.ProtoReflect.Descriptor instead.
+func (*Value) Descriptor() ([]byte, []int) {
+ return file_cel_expr_value_proto_rawDescGZIP(), []int{0}
+}
+
+func (m *Value) GetKind() isValue_Kind {
+ if m != nil {
+ return m.Kind
+ }
+ return nil
+}
+
+func (x *Value) GetNullValue() structpb.NullValue {
+ if x, ok := x.GetKind().(*Value_NullValue); ok {
+ return x.NullValue
+ }
+ return structpb.NullValue(0)
+}
+
+func (x *Value) GetBoolValue() bool {
+ if x, ok := x.GetKind().(*Value_BoolValue); ok {
+ return x.BoolValue
+ }
+ return false
+}
+
+func (x *Value) GetInt64Value() int64 {
+ if x, ok := x.GetKind().(*Value_Int64Value); ok {
+ return x.Int64Value
+ }
+ return 0
+}
+
+func (x *Value) GetUint64Value() uint64 {
+ if x, ok := x.GetKind().(*Value_Uint64Value); ok {
+ return x.Uint64Value
+ }
+ return 0
+}
+
+func (x *Value) GetDoubleValue() float64 {
+ if x, ok := x.GetKind().(*Value_DoubleValue); ok {
+ return x.DoubleValue
+ }
+ return 0
+}
+
+func (x *Value) GetStringValue() string {
+ if x, ok := x.GetKind().(*Value_StringValue); ok {
+ return x.StringValue
+ }
+ return ""
+}
+
+func (x *Value) GetBytesValue() []byte {
+ if x, ok := x.GetKind().(*Value_BytesValue); ok {
+ return x.BytesValue
+ }
+ return nil
+}
+
+func (x *Value) GetEnumValue() *EnumValue {
+ if x, ok := x.GetKind().(*Value_EnumValue); ok {
+ return x.EnumValue
+ }
+ return nil
+}
+
+func (x *Value) GetObjectValue() *anypb.Any {
+ if x, ok := x.GetKind().(*Value_ObjectValue); ok {
+ return x.ObjectValue
+ }
+ return nil
+}
+
+func (x *Value) GetMapValue() *MapValue {
+ if x, ok := x.GetKind().(*Value_MapValue); ok {
+ return x.MapValue
+ }
+ return nil
+}
+
+func (x *Value) GetListValue() *ListValue {
+ if x, ok := x.GetKind().(*Value_ListValue); ok {
+ return x.ListValue
+ }
+ return nil
+}
+
+func (x *Value) GetTypeValue() string {
+ if x, ok := x.GetKind().(*Value_TypeValue); ok {
+ return x.TypeValue
+ }
+ return ""
+}
+
+type isValue_Kind interface {
+ isValue_Kind()
+}
+
+type Value_NullValue struct {
+ NullValue structpb.NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"`
+}
+
+type Value_BoolValue struct {
+ BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof"`
+}
+
+type Value_Int64Value struct {
+ Int64Value int64 `protobuf:"varint,3,opt,name=int64_value,json=int64Value,proto3,oneof"`
+}
+
+type Value_Uint64Value struct {
+ Uint64Value uint64 `protobuf:"varint,4,opt,name=uint64_value,json=uint64Value,proto3,oneof"`
+}
+
+type Value_DoubleValue struct {
+ DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,proto3,oneof"`
+}
+
+type Value_StringValue struct {
+ StringValue string `protobuf:"bytes,6,opt,name=string_value,json=stringValue,proto3,oneof"`
+}
+
+type Value_BytesValue struct {
+ BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"`
+}
+
+type Value_EnumValue struct {
+ EnumValue *EnumValue `protobuf:"bytes,9,opt,name=enum_value,json=enumValue,proto3,oneof"`
+}
+
+type Value_ObjectValue struct {
+ ObjectValue *anypb.Any `protobuf:"bytes,10,opt,name=object_value,json=objectValue,proto3,oneof"`
+}
+
+type Value_MapValue struct {
+ MapValue *MapValue `protobuf:"bytes,11,opt,name=map_value,json=mapValue,proto3,oneof"`
+}
+
+type Value_ListValue struct {
+ ListValue *ListValue `protobuf:"bytes,12,opt,name=list_value,json=listValue,proto3,oneof"`
+}
+
+type Value_TypeValue struct {
+ TypeValue string `protobuf:"bytes,15,opt,name=type_value,json=typeValue,proto3,oneof"`
+}
+
+func (*Value_NullValue) isValue_Kind() {}
+
+func (*Value_BoolValue) isValue_Kind() {}
+
+func (*Value_Int64Value) isValue_Kind() {}
+
+func (*Value_Uint64Value) isValue_Kind() {}
+
+func (*Value_DoubleValue) isValue_Kind() {}
+
+func (*Value_StringValue) isValue_Kind() {}
+
+func (*Value_BytesValue) isValue_Kind() {}
+
+func (*Value_EnumValue) isValue_Kind() {}
+
+func (*Value_ObjectValue) isValue_Kind() {}
+
+func (*Value_MapValue) isValue_Kind() {}
+
+func (*Value_ListValue) isValue_Kind() {}
+
+func (*Value_TypeValue) isValue_Kind() {}
+
+type EnumValue struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+ Value int32 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *EnumValue) Reset() {
+ *x = EnumValue{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_value_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *EnumValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EnumValue) ProtoMessage() {}
+
+func (x *EnumValue) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_value_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EnumValue.ProtoReflect.Descriptor instead.
+func (*EnumValue) Descriptor() ([]byte, []int) {
+ return file_cel_expr_value_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *EnumValue) GetType() string {
+ if x != nil {
+ return x.Type
+ }
+ return ""
+}
+
+func (x *EnumValue) GetValue() int32 {
+ if x != nil {
+ return x.Value
+ }
+ return 0
+}
+
+type ListValue struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+}
+
+func (x *ListValue) Reset() {
+ *x = ListValue{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_value_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListValue) ProtoMessage() {}
+
+func (x *ListValue) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_value_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListValue.ProtoReflect.Descriptor instead.
+func (*ListValue) Descriptor() ([]byte, []int) {
+ return file_cel_expr_value_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ListValue) GetValues() []*Value {
+ if x != nil {
+ return x.Values
+ }
+ return nil
+}
+
+type MapValue struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Entries []*MapValue_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"`
+}
+
+func (x *MapValue) Reset() {
+ *x = MapValue{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_value_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MapValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MapValue) ProtoMessage() {}
+
+func (x *MapValue) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_value_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MapValue.ProtoReflect.Descriptor instead.
+func (*MapValue) Descriptor() ([]byte, []int) {
+ return file_cel_expr_value_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *MapValue) GetEntries() []*MapValue_Entry {
+ if x != nil {
+ return x.Entries
+ }
+ return nil
+}
+
+type MapValue_Entry struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Key *Value `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ Value *Value `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *MapValue_Entry) Reset() {
+ *x = MapValue_Entry{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_value_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MapValue_Entry) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MapValue_Entry) ProtoMessage() {}
+
+func (x *MapValue_Entry) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_value_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MapValue_Entry.ProtoReflect.Descriptor instead.
+func (*MapValue_Entry) Descriptor() ([]byte, []int) {
+ return file_cel_expr_value_proto_rawDescGZIP(), []int{3, 0}
+}
+
+func (x *MapValue_Entry) GetKey() *Value {
+ if x != nil {
+ return x.Key
+ }
+ return nil
+}
+
+func (x *MapValue_Entry) GetValue() *Value {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+var File_cel_expr_value_proto protoreflect.FileDescriptor
+
+var file_cel_expr_value_proto_rawDesc = []byte{
+ 0x0a, 0x14, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72,
+ 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72,
+ 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9d, 0x04, 0x0a, 0x05, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56,
+ 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x0b, 0x75, 0x69,
+ 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x64, 0x6f, 0x75,
+ 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x48,
+ 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23,
+ 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06,
+ 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65,
+ 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x65, 0x6c,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48,
+ 0x00, 0x52, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x39, 0x0a, 0x0c,
+ 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0a, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x6f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x31, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x4d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00,
+ 0x52, 0x08, 0x6d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x34, 0x0a, 0x0a, 0x6c, 0x69,
+ 0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13,
+ 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x12, 0x1f, 0x0a, 0x0a, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0f,
+ 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x09, 0x74, 0x79, 0x70, 0x65, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0x35, 0x0a, 0x09, 0x45, 0x6e, 0x75,
+ 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x22, 0x34, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a,
+ 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e,
+ 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0x91, 0x01, 0x0a, 0x08, 0x4d, 0x61, 0x70, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x4d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07,
+ 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a, 0x51, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x12, 0x21, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e,
+ 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x03,
+ 0x6b, 0x65, 0x79, 0x12, 0x25, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x2d, 0x0a, 0x0c, 0x64, 0x65,
+ 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0a, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, 0x65,
+ 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x33,
+}
+
+var (
+ file_cel_expr_value_proto_rawDescOnce sync.Once
+ file_cel_expr_value_proto_rawDescData = file_cel_expr_value_proto_rawDesc
+)
+
+func file_cel_expr_value_proto_rawDescGZIP() []byte {
+ file_cel_expr_value_proto_rawDescOnce.Do(func() {
+ file_cel_expr_value_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_value_proto_rawDescData)
+ })
+ return file_cel_expr_value_proto_rawDescData
+}
+
+var file_cel_expr_value_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
+var file_cel_expr_value_proto_goTypes = []interface{}{
+ (*Value)(nil), // 0: cel.expr.Value
+ (*EnumValue)(nil), // 1: cel.expr.EnumValue
+ (*ListValue)(nil), // 2: cel.expr.ListValue
+ (*MapValue)(nil), // 3: cel.expr.MapValue
+ (*MapValue_Entry)(nil), // 4: cel.expr.MapValue.Entry
+ (structpb.NullValue)(0), // 5: google.protobuf.NullValue
+ (*anypb.Any)(nil), // 6: google.protobuf.Any
+}
+var file_cel_expr_value_proto_depIdxs = []int32{
+ 5, // 0: cel.expr.Value.null_value:type_name -> google.protobuf.NullValue
+ 1, // 1: cel.expr.Value.enum_value:type_name -> cel.expr.EnumValue
+ 6, // 2: cel.expr.Value.object_value:type_name -> google.protobuf.Any
+ 3, // 3: cel.expr.Value.map_value:type_name -> cel.expr.MapValue
+ 2, // 4: cel.expr.Value.list_value:type_name -> cel.expr.ListValue
+ 0, // 5: cel.expr.ListValue.values:type_name -> cel.expr.Value
+ 4, // 6: cel.expr.MapValue.entries:type_name -> cel.expr.MapValue.Entry
+ 0, // 7: cel.expr.MapValue.Entry.key:type_name -> cel.expr.Value
+ 0, // 8: cel.expr.MapValue.Entry.value:type_name -> cel.expr.Value
+ 9, // [9:9] is the sub-list for method output_type
+ 9, // [9:9] is the sub-list for method input_type
+ 9, // [9:9] is the sub-list for extension type_name
+ 9, // [9:9] is the sub-list for extension extendee
+ 0, // [0:9] is the sub-list for field type_name
+}
+
+func init() { file_cel_expr_value_proto_init() }
+func file_cel_expr_value_proto_init() {
+ if File_cel_expr_value_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_cel_expr_value_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Value); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_value_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*EnumValue); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_value_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ListValue); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_value_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MapValue); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_value_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MapValue_Entry); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_cel_expr_value_proto_msgTypes[0].OneofWrappers = []interface{}{
+ (*Value_NullValue)(nil),
+ (*Value_BoolValue)(nil),
+ (*Value_Int64Value)(nil),
+ (*Value_Uint64Value)(nil),
+ (*Value_DoubleValue)(nil),
+ (*Value_StringValue)(nil),
+ (*Value_BytesValue)(nil),
+ (*Value_EnumValue)(nil),
+ (*Value_ObjectValue)(nil),
+ (*Value_MapValue)(nil),
+ (*Value_ListValue)(nil),
+ (*Value_TypeValue)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_cel_expr_value_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 5,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_cel_expr_value_proto_goTypes,
+ DependencyIndexes: file_cel_expr_value_proto_depIdxs,
+ MessageInfos: file_cel_expr_value_proto_msgTypes,
+ }.Build()
+ File_cel_expr_value_proto = out.File
+ file_cel_expr_value_proto_rawDesc = nil
+ file_cel_expr_value_proto_goTypes = nil
+ file_cel_expr_value_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/NYTimes/gziphandler/.gitignore b/vendor/github.com/NYTimes/gziphandler/.gitignore
new file mode 100644
index 000000000..1377554eb
--- /dev/null
+++ b/vendor/github.com/NYTimes/gziphandler/.gitignore
@@ -0,0 +1 @@
+*.swp
diff --git a/vendor/github.com/NYTimes/gziphandler/.travis.yml b/vendor/github.com/NYTimes/gziphandler/.travis.yml
new file mode 100644
index 000000000..94dfae362
--- /dev/null
+++ b/vendor/github.com/NYTimes/gziphandler/.travis.yml
@@ -0,0 +1,10 @@
+language: go
+go:
+ - 1.x
+ - tip
+env:
+ - GO111MODULE=on
+install:
+ - go mod download
+script:
+ - go test -race -v
diff --git a/vendor/github.com/NYTimes/gziphandler/CODE_OF_CONDUCT.md b/vendor/github.com/NYTimes/gziphandler/CODE_OF_CONDUCT.md
new file mode 100644
index 000000000..cdbca194c
--- /dev/null
+++ b/vendor/github.com/NYTimes/gziphandler/CODE_OF_CONDUCT.md
@@ -0,0 +1,75 @@
+---
+layout: code-of-conduct
+version: v1.0
+---
+
+This code of conduct outlines our expectations for participants within the **NYTimes/gziphandler** community, as well as steps to reporting unacceptable behavior. We are committed to providing a welcoming and inspiring community for all and expect our code of conduct to be honored. Anyone who violates this code of conduct may be banned from the community.
+
+Our open source community strives to:
+
+* **Be friendly and patient.**
+* **Be welcoming**: We strive to be a community that welcomes and supports people of all backgrounds and identities. This includes, but is not limited to members of any race, ethnicity, culture, national origin, colour, immigration status, social and economic class, educational level, sex, sexual orientation, gender identity and expression, age, size, family status, political belief, religion, and mental and physical ability.
+* **Be considerate**: Your work will be used by other people, and you in turn will depend on the work of others. Any decision you take will affect users and colleagues, and you should take those consequences into account when making decisions. Remember that we're a world-wide community, so you might not be communicating in someone else's primary language.
+* **Be respectful**: Not all of us will agree all the time, but disagreement is no excuse for poor behavior and poor manners. We might all experience some frustration now and then, but we cannot allow that frustration to turn into a personal attack. It’s important to remember that a community where people feel uncomfortable or threatened is not a productive one.
+* **Be careful in the words that we choose**: we are a community of professionals, and we conduct ourselves professionally. Be kind to others. Do not insult or put down other participants. Harassment and other exclusionary behavior aren't acceptable.
+* **Try to understand why we disagree**: Disagreements, both social and technical, happen all the time. It is important that we resolve disagreements and differing views constructively. Remember that we’re different. The strength of our community comes from its diversity, people from a wide range of backgrounds. Different people have different perspectives on issues. Being unable to understand why someone holds a viewpoint doesn’t mean that they’re wrong. Don’t forget that it is human to err and blaming each other doesn’t get us anywhere. Instead, focus on helping to resolve issues and learning from mistakes.
+
+## Definitions
+
+Harassment includes, but is not limited to:
+
+- Offensive comments related to gender, gender identity and expression, sexual orientation, disability, mental illness, neuro(a)typicality, physical appearance, body size, race, age, regional discrimination, political or religious affiliation
+- Unwelcome comments regarding a person’s lifestyle choices and practices, including those related to food, health, parenting, drugs, and employment
+- Deliberate misgendering. This includes deadnaming or persistently using a pronoun that does not correctly reflect a person's gender identity. You must address people by the name they give you when not addressing them by their username or handle
+- Physical contact and simulated physical contact (eg, textual descriptions like “*hug*” or “*backrub*”) without consent or after a request to stop
+- Threats of violence, both physical and psychological
+- Incitement of violence towards any individual, including encouraging a person to commit suicide or to engage in self-harm
+- Deliberate intimidation
+- Stalking or following
+- Harassing photography or recording, including logging online activity for harassment purposes
+- Sustained disruption of discussion
+- Unwelcome sexual attention, including gratuitous or off-topic sexual images or behaviour
+- Pattern of inappropriate social contact, such as requesting/assuming inappropriate levels of intimacy with others
+- Continued one-on-one communication after requests to cease
+- Deliberate “outing” of any aspect of a person’s identity without their consent except as necessary to protect others from intentional abuse
+- Publication of non-harassing private communication
+
+Our open source community prioritizes marginalized people’s safety over privileged people’s comfort. We will not act on complaints regarding:
+
+- ‘Reverse’ -isms, including ‘reverse racism,’ ‘reverse sexism,’ and ‘cisphobia’
+- Reasonable communication of boundaries, such as “leave me alone,” “go away,” or “I’m not discussing this with you”
+- Refusal to explain or debate social justice concepts
+- Communicating in a ‘tone’ you don’t find congenial
+- Criticizing racist, sexist, cissexist, or otherwise oppressive behavior or assumptions
+
+
+### Diversity Statement
+
+We encourage everyone to participate and are committed to building a community for all. Although we will fail at times, we seek to treat everyone both as fairly and equally as possible. Whenever a participant has made a mistake, we expect them to take responsibility for it. If someone has been harmed or offended, it is our responsibility to listen carefully and respectfully, and do our best to right the wrong.
+
+Although this list cannot be exhaustive, we explicitly honor diversity in age, gender, gender identity or expression, culture, ethnicity, language, national origin, political beliefs, profession, race, religion, sexual orientation, socioeconomic status, and technical ability. We will not tolerate discrimination based on any of the protected
+characteristics above, including participants with disabilities.
+
+### Reporting Issues
+
+If you experience or witness unacceptable behavior—or have any other concerns—please report it by contacting us via **code@nytimes.com**. All reports will be handled with discretion. In your report please include:
+
+- Your contact information.
+- Names (real, nicknames, or pseudonyms) of any individuals involved. If there are additional witnesses, please
+include them as well. Your account of what occurred, and if you believe the incident is ongoing. If there is a publicly available record (e.g. a mailing list archive or a public IRC logger), please include a link.
+- Any additional information that may be helpful.
+
+After filing a report, a representative will contact you personally, review the incident, follow up with any additional questions, and make a decision as to how to respond. If the person who is harassing you is part of the response team, they will recuse themselves from handling your incident. If the complaint originates from a member of the response team, it will be handled by a different member of the response team. We will respect confidentiality requests for the purpose of protecting victims of abuse.
+
+### Attribution & Acknowledgements
+
+We all stand on the shoulders of giants across many open source communities. We'd like to thank the communities and projects that established code of conducts and diversity statements as our inspiration:
+
+* [Django](https://www.djangoproject.com/conduct/reporting/)
+* [Python](https://www.python.org/community/diversity/)
+* [Ubuntu](http://www.ubuntu.com/about/about-ubuntu/conduct)
+* [Contributor Covenant](http://contributor-covenant.org/)
+* [Geek Feminism](http://geekfeminism.org/about/code-of-conduct/)
+* [Citizen Code of Conduct](http://citizencodeofconduct.org/)
+
+This Code of Conduct was based on https://github.com/todogroup/opencodeofconduct
diff --git a/vendor/github.com/NYTimes/gziphandler/CONTRIBUTING.md b/vendor/github.com/NYTimes/gziphandler/CONTRIBUTING.md
new file mode 100644
index 000000000..b89a9eb4f
--- /dev/null
+++ b/vendor/github.com/NYTimes/gziphandler/CONTRIBUTING.md
@@ -0,0 +1,30 @@
+# Contributing to NYTimes/gziphandler
+
+This is an open source project started by handful of developers at The New York Times and open to the entire Go community.
+
+We really appreciate your help!
+
+## Filing issues
+
+When filing an issue, make sure to answer these five questions:
+
+1. What version of Go are you using (`go version`)?
+2. What operating system and processor architecture are you using?
+3. What did you do?
+4. What did you expect to see?
+5. What did you see instead?
+
+## Contributing code
+
+Before submitting changes, please follow these guidelines:
+
+1. Check the open issues and pull requests for existing discussions.
+2. Open an issue to discuss a new feature.
+3. Write tests.
+4. Make sure code follows the ['Go Code Review Comments'](https://github.com/golang/go/wiki/CodeReviewComments).
+5. Make sure your changes pass `go test`.
+6. Make sure the entire test suite passes locally and on Travis CI.
+7. Open a Pull Request.
+8. [Squash your commits](http://gitready.com/advanced/2009/02/10/squashing-commits-with-rebase.html) after receiving feedback and add a [great commit message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html).
+
+Unless otherwise noted, the gziphandler source files are distributed under the Apache 2.0-style license found in the LICENSE.md file.
diff --git a/vendor/github.com/NYTimes/gziphandler/LICENSE b/vendor/github.com/NYTimes/gziphandler/LICENSE
new file mode 100644
index 000000000..df6192d36
--- /dev/null
+++ b/vendor/github.com/NYTimes/gziphandler/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016-2017 The New York Times Company
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/NYTimes/gziphandler/README.md b/vendor/github.com/NYTimes/gziphandler/README.md
new file mode 100644
index 000000000..6259acaca
--- /dev/null
+++ b/vendor/github.com/NYTimes/gziphandler/README.md
@@ -0,0 +1,56 @@
+Gzip Handler
+============
+
+This is a tiny Go package which wraps HTTP handlers to transparently gzip the
+response body, for clients which support it. Although it's usually simpler to
+leave that to a reverse proxy (like nginx or Varnish), this package is useful
+when that's undesirable.
+
+## Install
+```bash
+go get -u github.com/NYTimes/gziphandler
+```
+
+## Usage
+
+Call `GzipHandler` with any handler (an object which implements the
+`http.Handler` interface), and it'll return a new handler which gzips the
+response. For example:
+
+```go
+package main
+
+import (
+ "io"
+ "net/http"
+ "github.com/NYTimes/gziphandler"
+)
+
+func main() {
+ withoutGz := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "text/plain")
+ io.WriteString(w, "Hello, World")
+ })
+
+ withGz := gziphandler.GzipHandler(withoutGz)
+
+ http.Handle("/", withGz)
+ http.ListenAndServe("0.0.0.0:8000", nil)
+}
+```
+
+
+## Documentation
+
+The docs can be found at [godoc.org][docs], as usual.
+
+
+## License
+
+[Apache 2.0][license].
+
+
+
+
+[docs]: https://godoc.org/github.com/NYTimes/gziphandler
+[license]: https://github.com/NYTimes/gziphandler/blob/master/LICENSE
diff --git a/vendor/github.com/NYTimes/gziphandler/gzip.go b/vendor/github.com/NYTimes/gziphandler/gzip.go
new file mode 100644
index 000000000..c112bbdf8
--- /dev/null
+++ b/vendor/github.com/NYTimes/gziphandler/gzip.go
@@ -0,0 +1,532 @@
+package gziphandler // import "github.com/NYTimes/gziphandler"
+
+import (
+ "bufio"
+ "compress/gzip"
+ "fmt"
+ "io"
+ "mime"
+ "net"
+ "net/http"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+const (
+ vary = "Vary"
+ acceptEncoding = "Accept-Encoding"
+ contentEncoding = "Content-Encoding"
+ contentType = "Content-Type"
+ contentLength = "Content-Length"
+)
+
+type codings map[string]float64
+
+const (
+ // DefaultQValue is the default qvalue to assign to an encoding if no explicit qvalue is set.
+ // This is actually kind of ambiguous in RFC 2616, so hopefully it's correct.
+ // The examples seem to indicate that it is.
+ DefaultQValue = 1.0
+
+ // DefaultMinSize is the default minimum size until we enable gzip compression.
+ // 1500 bytes is the MTU size for the internet since that is the largest size allowed at the network layer.
+ // If you take a file that is 1300 bytes and compress it to 800 bytes, it’s still transmitted in that same 1500 byte packet regardless, so you’ve gained nothing.
+ // That being the case, you should restrict the gzip compression to files with a size greater than a single packet, 1400 bytes (1.4KB) is a safe value.
+ DefaultMinSize = 1400
+)
+
+// gzipWriterPools stores a sync.Pool for each compression level for reuse of
+// gzip.Writers. Use poolIndex to covert a compression level to an index into
+// gzipWriterPools.
+var gzipWriterPools [gzip.BestCompression - gzip.BestSpeed + 2]*sync.Pool
+
+func init() {
+ for i := gzip.BestSpeed; i <= gzip.BestCompression; i++ {
+ addLevelPool(i)
+ }
+ addLevelPool(gzip.DefaultCompression)
+}
+
+// poolIndex maps a compression level to its index into gzipWriterPools. It
+// assumes that level is a valid gzip compression level.
+func poolIndex(level int) int {
+ // gzip.DefaultCompression == -1, so we need to treat it special.
+ if level == gzip.DefaultCompression {
+ return gzip.BestCompression - gzip.BestSpeed + 1
+ }
+ return level - gzip.BestSpeed
+}
+
+func addLevelPool(level int) {
+ gzipWriterPools[poolIndex(level)] = &sync.Pool{
+ New: func() interface{} {
+ // NewWriterLevel only returns error on a bad level, we are guaranteeing
+ // that this will be a valid level so it is okay to ignore the returned
+ // error.
+ w, _ := gzip.NewWriterLevel(nil, level)
+ return w
+ },
+ }
+}
+
+// GzipResponseWriter provides an http.ResponseWriter interface, which gzips
+// bytes before writing them to the underlying response. This doesn't close the
+// writers, so don't forget to do that.
+// It can be configured to skip response smaller than minSize.
+type GzipResponseWriter struct {
+ http.ResponseWriter
+ index int // Index for gzipWriterPools.
+ gw *gzip.Writer
+
+ code int // Saves the WriteHeader value.
+
+ minSize int // Specifed the minimum response size to gzip. If the response length is bigger than this value, it is compressed.
+ buf []byte // Holds the first part of the write before reaching the minSize or the end of the write.
+ ignore bool // If true, then we immediately passthru writes to the underlying ResponseWriter.
+
+ contentTypes []parsedContentType // Only compress if the response is one of these content-types. All are accepted if empty.
+}
+
+type GzipResponseWriterWithCloseNotify struct {
+ *GzipResponseWriter
+}
+
+func (w GzipResponseWriterWithCloseNotify) CloseNotify() <-chan bool {
+ return w.ResponseWriter.(http.CloseNotifier).CloseNotify()
+}
+
+// Write appends data to the gzip writer.
+func (w *GzipResponseWriter) Write(b []byte) (int, error) {
+ // GZIP responseWriter is initialized. Use the GZIP responseWriter.
+ if w.gw != nil {
+ return w.gw.Write(b)
+ }
+
+ // If we have already decided not to use GZIP, immediately passthrough.
+ if w.ignore {
+ return w.ResponseWriter.Write(b)
+ }
+
+ // Save the write into a buffer for later use in GZIP responseWriter (if content is long enough) or at close with regular responseWriter.
+ // On the first write, w.buf changes from nil to a valid slice
+ w.buf = append(w.buf, b...)
+
+ var (
+ cl, _ = strconv.Atoi(w.Header().Get(contentLength))
+ ct = w.Header().Get(contentType)
+ ce = w.Header().Get(contentEncoding)
+ )
+ // Only continue if they didn't already choose an encoding or a known unhandled content length or type.
+ if ce == "" && (cl == 0 || cl >= w.minSize) && (ct == "" || handleContentType(w.contentTypes, ct)) {
+ // If the current buffer is less than minSize and a Content-Length isn't set, then wait until we have more data.
+ if len(w.buf) < w.minSize && cl == 0 {
+ return len(b), nil
+ }
+ // If the Content-Length is larger than minSize or the current buffer is larger than minSize, then continue.
+ if cl >= w.minSize || len(w.buf) >= w.minSize {
+ // If a Content-Type wasn't specified, infer it from the current buffer.
+ if ct == "" {
+ ct = http.DetectContentType(w.buf)
+ w.Header().Set(contentType, ct)
+ }
+ // If the Content-Type is acceptable to GZIP, initialize the GZIP writer.
+ if handleContentType(w.contentTypes, ct) {
+ if err := w.startGzip(); err != nil {
+ return 0, err
+ }
+ return len(b), nil
+ }
+ }
+ }
+ // If we got here, we should not GZIP this response.
+ if err := w.startPlain(); err != nil {
+ return 0, err
+ }
+ return len(b), nil
+}
+
+// startGzip initializes a GZIP writer and writes the buffer.
+func (w *GzipResponseWriter) startGzip() error {
+ // Set the GZIP header.
+ w.Header().Set(contentEncoding, "gzip")
+
+ // if the Content-Length is already set, then calls to Write on gzip
+ // will fail to set the Content-Length header since its already set
+ // See: https://github.com/golang/go/issues/14975.
+ w.Header().Del(contentLength)
+
+ // Write the header to gzip response.
+ if w.code != 0 {
+ w.ResponseWriter.WriteHeader(w.code)
+ // Ensure that no other WriteHeader's happen
+ w.code = 0
+ }
+
+ // Initialize and flush the buffer into the gzip response if there are any bytes.
+ // If there aren't any, we shouldn't initialize it yet because on Close it will
+ // write the gzip header even if nothing was ever written.
+ if len(w.buf) > 0 {
+ // Initialize the GZIP response.
+ w.init()
+ n, err := w.gw.Write(w.buf)
+
+ // This should never happen (per io.Writer docs), but if the write didn't
+ // accept the entire buffer but returned no specific error, we have no clue
+ // what's going on, so abort just to be safe.
+ if err == nil && n < len(w.buf) {
+ err = io.ErrShortWrite
+ }
+ return err
+ }
+ return nil
+}
+
+// startPlain writes to sent bytes and buffer the underlying ResponseWriter without gzip.
+func (w *GzipResponseWriter) startPlain() error {
+ if w.code != 0 {
+ w.ResponseWriter.WriteHeader(w.code)
+ // Ensure that no other WriteHeader's happen
+ w.code = 0
+ }
+ w.ignore = true
+ // If Write was never called then don't call Write on the underlying ResponseWriter.
+ if w.buf == nil {
+ return nil
+ }
+ n, err := w.ResponseWriter.Write(w.buf)
+ w.buf = nil
+ // This should never happen (per io.Writer docs), but if the write didn't
+ // accept the entire buffer but returned no specific error, we have no clue
+ // what's going on, so abort just to be safe.
+ if err == nil && n < len(w.buf) {
+ err = io.ErrShortWrite
+ }
+ return err
+}
+
+// WriteHeader just saves the response code until close or GZIP effective writes.
+func (w *GzipResponseWriter) WriteHeader(code int) {
+ if w.code == 0 {
+ w.code = code
+ }
+}
+
+// init graps a new gzip writer from the gzipWriterPool and writes the correct
+// content encoding header.
+func (w *GzipResponseWriter) init() {
+ // Bytes written during ServeHTTP are redirected to this gzip writer
+ // before being written to the underlying response.
+ gzw := gzipWriterPools[w.index].Get().(*gzip.Writer)
+ gzw.Reset(w.ResponseWriter)
+ w.gw = gzw
+}
+
+// Close will close the gzip.Writer and will put it back in the gzipWriterPool.
+func (w *GzipResponseWriter) Close() error {
+ if w.ignore {
+ return nil
+ }
+
+ if w.gw == nil {
+ // GZIP not triggered yet, write out regular response.
+ err := w.startPlain()
+ // Returns the error if any at write.
+ if err != nil {
+ err = fmt.Errorf("gziphandler: write to regular responseWriter at close gets error: %q", err.Error())
+ }
+ return err
+ }
+
+ err := w.gw.Close()
+ gzipWriterPools[w.index].Put(w.gw)
+ w.gw = nil
+ return err
+}
+
+// Flush flushes the underlying *gzip.Writer and then the underlying
+// http.ResponseWriter if it is an http.Flusher. This makes GzipResponseWriter
+// an http.Flusher.
+func (w *GzipResponseWriter) Flush() {
+ if w.gw == nil && !w.ignore {
+ // Only flush once startGzip or startPlain has been called.
+ //
+ // Flush is thus a no-op until we're certain whether a plain
+ // or gzipped response will be served.
+ return
+ }
+
+ if w.gw != nil {
+ w.gw.Flush()
+ }
+
+ if fw, ok := w.ResponseWriter.(http.Flusher); ok {
+ fw.Flush()
+ }
+}
+
+// Hijack implements http.Hijacker. If the underlying ResponseWriter is a
+// Hijacker, its Hijack method is returned. Otherwise an error is returned.
+func (w *GzipResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ if hj, ok := w.ResponseWriter.(http.Hijacker); ok {
+ return hj.Hijack()
+ }
+ return nil, nil, fmt.Errorf("http.Hijacker interface is not supported")
+}
+
+// verify Hijacker interface implementation
+var _ http.Hijacker = &GzipResponseWriter{}
+
+// MustNewGzipLevelHandler behaves just like NewGzipLevelHandler except that in
+// an error case it panics rather than returning an error.
+func MustNewGzipLevelHandler(level int) func(http.Handler) http.Handler {
+ wrap, err := NewGzipLevelHandler(level)
+ if err != nil {
+ panic(err)
+ }
+ return wrap
+}
+
+// NewGzipLevelHandler returns a wrapper function (often known as middleware)
+// which can be used to wrap an HTTP handler to transparently gzip the response
+// body if the client supports it (via the Accept-Encoding header). Responses will
+// be encoded at the given gzip compression level. An error will be returned only
+// if an invalid gzip compression level is given, so if one can ensure the level
+// is valid, the returned error can be safely ignored.
+func NewGzipLevelHandler(level int) (func(http.Handler) http.Handler, error) {
+ return NewGzipLevelAndMinSize(level, DefaultMinSize)
+}
+
+// NewGzipLevelAndMinSize behave as NewGzipLevelHandler except it let the caller
+// specify the minimum size before compression.
+func NewGzipLevelAndMinSize(level, minSize int) (func(http.Handler) http.Handler, error) {
+ return GzipHandlerWithOpts(CompressionLevel(level), MinSize(minSize))
+}
+
+func GzipHandlerWithOpts(opts ...option) (func(http.Handler) http.Handler, error) {
+ c := &config{
+ level: gzip.DefaultCompression,
+ minSize: DefaultMinSize,
+ }
+
+ for _, o := range opts {
+ o(c)
+ }
+
+ if err := c.validate(); err != nil {
+ return nil, err
+ }
+
+ return func(h http.Handler) http.Handler {
+ index := poolIndex(c.level)
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Add(vary, acceptEncoding)
+ if acceptsGzip(r) {
+ gw := &GzipResponseWriter{
+ ResponseWriter: w,
+ index: index,
+ minSize: c.minSize,
+ contentTypes: c.contentTypes,
+ }
+ defer gw.Close()
+
+ if _, ok := w.(http.CloseNotifier); ok {
+ gwcn := GzipResponseWriterWithCloseNotify{gw}
+ h.ServeHTTP(gwcn, r)
+ } else {
+ h.ServeHTTP(gw, r)
+ }
+
+ } else {
+ h.ServeHTTP(w, r)
+ }
+ })
+ }, nil
+}
+
+// Parsed representation of one of the inputs to ContentTypes.
+// See https://golang.org/pkg/mime/#ParseMediaType
+type parsedContentType struct {
+ mediaType string
+ params map[string]string
+}
+
+// equals returns whether this content type matches another content type.
+func (pct parsedContentType) equals(mediaType string, params map[string]string) bool {
+ if pct.mediaType != mediaType {
+ return false
+ }
+ // if pct has no params, don't care about other's params
+ if len(pct.params) == 0 {
+ return true
+ }
+
+ // if pct has any params, they must be identical to other's.
+ if len(pct.params) != len(params) {
+ return false
+ }
+ for k, v := range pct.params {
+ if w, ok := params[k]; !ok || v != w {
+ return false
+ }
+ }
+ return true
+}
+
+// Used for functional configuration.
+type config struct {
+ minSize int
+ level int
+ contentTypes []parsedContentType
+}
+
+func (c *config) validate() error {
+ if c.level != gzip.DefaultCompression && (c.level < gzip.BestSpeed || c.level > gzip.BestCompression) {
+ return fmt.Errorf("invalid compression level requested: %d", c.level)
+ }
+
+ if c.minSize < 0 {
+ return fmt.Errorf("minimum size must be more than zero")
+ }
+
+ return nil
+}
+
+type option func(c *config)
+
+func MinSize(size int) option {
+ return func(c *config) {
+ c.minSize = size
+ }
+}
+
+func CompressionLevel(level int) option {
+ return func(c *config) {
+ c.level = level
+ }
+}
+
+// ContentTypes specifies a list of content types to compare
+// the Content-Type header to before compressing. If none
+// match, the response will be returned as-is.
+//
+// Content types are compared in a case-insensitive, whitespace-ignored
+// manner.
+//
+// A MIME type without any other directive will match a content type
+// that has the same MIME type, regardless of that content type's other
+// directives. I.e., "text/html" will match both "text/html" and
+// "text/html; charset=utf-8".
+//
+// A MIME type with any other directive will only match a content type
+// that has the same MIME type and other directives. I.e.,
+// "text/html; charset=utf-8" will only match "text/html; charset=utf-8".
+//
+// By default, responses are gzipped regardless of
+// Content-Type.
+func ContentTypes(types []string) option {
+ return func(c *config) {
+ c.contentTypes = []parsedContentType{}
+ for _, v := range types {
+ mediaType, params, err := mime.ParseMediaType(v)
+ if err == nil {
+ c.contentTypes = append(c.contentTypes, parsedContentType{mediaType, params})
+ }
+ }
+ }
+}
+
+// GzipHandler wraps an HTTP handler, to transparently gzip the response body if
+// the client supports it (via the Accept-Encoding header). This will compress at
+// the default compression level.
+func GzipHandler(h http.Handler) http.Handler {
+ wrapper, _ := NewGzipLevelHandler(gzip.DefaultCompression)
+ return wrapper(h)
+}
+
+// acceptsGzip returns true if the given HTTP request indicates that it will
+// accept a gzipped response.
+func acceptsGzip(r *http.Request) bool {
+ acceptedEncodings, _ := parseEncodings(r.Header.Get(acceptEncoding))
+ return acceptedEncodings["gzip"] > 0.0
+}
+
+// returns true if we've been configured to compress the specific content type.
+func handleContentType(contentTypes []parsedContentType, ct string) bool {
+ // If contentTypes is empty we handle all content types.
+ if len(contentTypes) == 0 {
+ return true
+ }
+
+ mediaType, params, err := mime.ParseMediaType(ct)
+ if err != nil {
+ return false
+ }
+
+ for _, c := range contentTypes {
+ if c.equals(mediaType, params) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// parseEncodings attempts to parse a list of codings, per RFC 2616, as might
+// appear in an Accept-Encoding header. It returns a map of content-codings to
+// quality values, and an error containing the errors encountered. It's probably
+// safe to ignore those, because silently ignoring errors is how the internet
+// works.
+//
+// See: http://tools.ietf.org/html/rfc2616#section-14.3.
+func parseEncodings(s string) (codings, error) {
+ c := make(codings)
+ var e []string
+
+ for _, ss := range strings.Split(s, ",") {
+ coding, qvalue, err := parseCoding(ss)
+
+ if err != nil {
+ e = append(e, err.Error())
+ } else {
+ c[coding] = qvalue
+ }
+ }
+
+ // TODO (adammck): Use a proper multi-error struct, so the individual errors
+ // can be extracted if anyone cares.
+ if len(e) > 0 {
+ return c, fmt.Errorf("errors while parsing encodings: %s", strings.Join(e, ", "))
+ }
+
+ return c, nil
+}
+
+// parseCoding parses a single conding (content-coding with an optional qvalue),
+// as might appear in an Accept-Encoding header. It attempts to forgive minor
+// formatting errors.
+func parseCoding(s string) (coding string, qvalue float64, err error) {
+ for n, part := range strings.Split(s, ";") {
+ part = strings.TrimSpace(part)
+ qvalue = DefaultQValue
+
+ if n == 0 {
+ coding = strings.ToLower(part)
+ } else if strings.HasPrefix(part, "q=") {
+ qvalue, err = strconv.ParseFloat(strings.TrimPrefix(part, "q="), 64)
+
+ if qvalue < 0.0 {
+ qvalue = 0.0
+ } else if qvalue > 1.0 {
+ qvalue = 1.0
+ }
+ }
+ }
+
+ if coding == "" {
+ err = fmt.Errorf("empty content-coding")
+ }
+
+ return
+}
diff --git a/vendor/github.com/NYTimes/gziphandler/gzip_go18.go b/vendor/github.com/NYTimes/gziphandler/gzip_go18.go
new file mode 100644
index 000000000..fa9665b7e
--- /dev/null
+++ b/vendor/github.com/NYTimes/gziphandler/gzip_go18.go
@@ -0,0 +1,43 @@
+// +build go1.8
+
+package gziphandler
+
+import "net/http"
+
+// Push initiates an HTTP/2 server push.
+// Push returns ErrNotSupported if the client has disabled push or if push
+// is not supported on the underlying connection.
+func (w *GzipResponseWriter) Push(target string, opts *http.PushOptions) error {
+ pusher, ok := w.ResponseWriter.(http.Pusher)
+ if ok && pusher != nil {
+ return pusher.Push(target, setAcceptEncodingForPushOptions(opts))
+ }
+ return http.ErrNotSupported
+}
+
+// setAcceptEncodingForPushOptions sets "Accept-Encoding" : "gzip" for PushOptions without overriding existing headers.
+func setAcceptEncodingForPushOptions(opts *http.PushOptions) *http.PushOptions {
+
+ if opts == nil {
+ opts = &http.PushOptions{
+ Header: http.Header{
+ acceptEncoding: []string{"gzip"},
+ },
+ }
+ return opts
+ }
+
+ if opts.Header == nil {
+ opts.Header = http.Header{
+ acceptEncoding: []string{"gzip"},
+ }
+ return opts
+ }
+
+ if encoding := opts.Header.Get(acceptEncoding); encoding == "" {
+ opts.Header.Add(acceptEncoding, "gzip")
+ return opts
+ }
+
+ return opts
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/.gitignore b/vendor/github.com/antlr4-go/antlr/v4/.gitignore
new file mode 100644
index 000000000..38ea34ff5
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/.gitignore
@@ -0,0 +1,18 @@
+### Go template
+
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, built with `go test -c`
+*.test
+
+
+# Go workspace file
+go.work
+
+# No Goland stuff in this repo
+.idea
diff --git a/vendor/github.com/antlr4-go/antlr/v4/LICENSE b/vendor/github.com/antlr4-go/antlr/v4/LICENSE
new file mode 100644
index 000000000..a22292eb5
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2012-2023 The ANTLR Project. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+
+3. Neither name of copyright holders nor the names of its contributors
+may be used to endorse or promote products derived from this software
+without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
+CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/antlr4-go/antlr/v4/README.md b/vendor/github.com/antlr4-go/antlr/v4/README.md
new file mode 100644
index 000000000..03e5b83eb
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/README.md
@@ -0,0 +1,54 @@
+[](https://goreportcard.com/report/github.com/antlr4-go/antlr)
+[](https://pkg.go.dev/github.com/antlr4-go/antlr)
+[](https://github.com/antlr4-go/antlr/releases/latest)
+[](https://github.com/antlr4-go/antlr/releases/latest)
+[](https://github.com/antlr4-go/antlr/commit-activity)
+[](https://opensource.org/licenses/BSD-3-Clause)
+[](https://GitHub.com/Naereen/StrapDown.js/stargazers/)
+# ANTLR4 Go Runtime Module Repo
+
+IMPORTANT: Please submit PRs via a clone of the https://github.com/antlr/antlr4 repo, and not here.
+
+ - Do not submit PRs or any change requests to this repo
+ - This repo is read only and is updated by the ANTLR team to create a new release of the Go Runtime for ANTLR
+ - This repo contains the Go runtime that your generated projects should import
+
+## Introduction
+
+This repo contains the official modules for the Go Runtime for ANTLR. It is a copy of the runtime maintained
+at: https://github.com/antlr/antlr4/tree/master/runtime/Go/antlr and is automatically updated by the ANTLR team to create
+the official Go runtime release only. No development work is carried out in this repo and PRs are not accepted here.
+
+The dev branch of this repo is kept in sync with the dev branch of the main ANTLR repo and is updated periodically.
+
+### Why?
+
+The `go get` command is unable to retrieve the Go runtime when it is embedded so
+deeply in the main repo. A `go get` against the `antlr/antlr4` repo, while retrieving the correct source code for the runtime,
+does not correctly resolve tags and will create a reference in your `go.mod` file that is unclear, will not upgrade smoothly and
+causes confusion.
+
+For instance, the current Go runtime release, which is tagged with v4.13.0 in `antlr/antlr4` is retrieved by go get as:
+
+```sh
+require (
+ github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230219212500-1f9a474cc2dc
+)
+```
+
+Where you would expect to see:
+
+```sh
+require (
+ github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.13.0
+)
+```
+
+The decision was taken to create a separate org in a separate repo to hold the official Go runtime for ANTLR and
+from whence users can expect `go get` to behave as expected.
+
+
+# Documentation
+Please read the official documentation at: https://github.com/antlr/antlr4/blob/master/doc/index.md for tips on
+migrating existing projects to use the new module location and for information on how to use the Go runtime in
+general.
diff --git a/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go b/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go
new file mode 100644
index 000000000..3bb4fd7c4
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go
@@ -0,0 +1,102 @@
+/*
+Package antlr implements the Go version of the ANTLR 4 runtime.
+
+# The ANTLR Tool
+
+ANTLR (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing,
+or translating structured text or binary files. It's widely used to build languages, tools, and frameworks.
+From a grammar, ANTLR generates a parser that can build parse trees and also generates a listener interface
+(or visitor) that makes it easy to respond to the recognition of phrases of interest.
+
+# Go Runtime
+
+At version 4.11.x and prior, the Go runtime was not properly versioned for go modules. After this point, the runtime
+source code to be imported was held in the `runtime/Go/antlr/v4` directory, and the go.mod file was updated to reflect the version of
+ANTLR4 that it is compatible with (I.E. uses the /v4 path).
+
+However, this was found to be problematic, as it meant that with the runtime embedded so far underneath the root
+of the repo, the `go get` and related commands could not properly resolve the location of the go runtime source code.
+This meant that the reference to the runtime in your `go.mod` file would refer to the correct source code, but would not
+list the release tag such as @4.12.0 - this was confusing, to say the least.
+
+As of 4.12.1, the runtime is now available as a go module in its own repo, and can be imported as `github.com/antlr4-go/antlr`
+(the go get command should also be used with this path). See the main documentation for the ANTLR4 project for more information,
+which is available at [ANTLR docs]. The documentation for using the Go runtime is available at [Go runtime docs].
+
+This means that if you are using the source code without modules, you should also use the source code in the [new repo].
+Though we highly recommend that you use go modules, as they are now idiomatic for Go.
+
+I am aware that this change will prove Hyrum's Law, but am prepared to live with it for the common good.
+
+Go runtime author: [Jim Idle] jimi@idle.ws
+
+# Code Generation
+
+ANTLR supports the generation of code in a number of [target languages], and the generated code is supported by a
+runtime library, written specifically to support the generated code in the target language. This library is the
+runtime for the Go target.
+
+To generate code for the go target, it is generally recommended to place the source grammar files in a package of
+their own, and use the `.sh` script method of generating code, using the go generate directive. In that same directory
+it is usual, though not required, to place the antlr tool that should be used to generate the code. That does mean
+that the antlr tool JAR file will be checked in to your source code control though, so you are, of course, free to use any other
+way of specifying the version of the ANTLR tool to use, such as aliasing in `.zshrc` or equivalent, or a profile in
+your IDE, or configuration in your CI system. Checking in the jar does mean that it is easy to reproduce the build as
+it was at any point in its history.
+
+Here is a general/recommended template for an ANTLR based recognizer in Go:
+
+ .
+ ├── parser
+ │ ├── mygrammar.g4
+ │ ├── antlr-4.12.1-complete.jar
+ │ ├── generate.go
+ │ └── generate.sh
+ ├── parsing - generated code goes here
+ │ └── error_listeners.go
+ ├── go.mod
+ ├── go.sum
+ ├── main.go
+ └── main_test.go
+
+Make sure that the package statement in your grammar file(s) reflects the go package the generated code will exist in.
+
+The generate.go file then looks like this:
+
+ package parser
+
+ //go:generate ./generate.sh
+
+And the generate.sh file will look similar to this:
+
+ #!/bin/sh
+
+ alias antlr4='java -Xmx500M -cp "./antlr4-4.12.1-complete.jar:$CLASSPATH" org.antlr.v4.Tool'
+ antlr4 -Dlanguage=Go -no-visitor -package parsing *.g4
+
+depending on whether you want visitors or listeners or any other ANTLR options. Not that another option here
+is to generate the code into a
+
+From the command line at the root of your source package (location of go.mo)d) you can then simply issue the command:
+
+ go generate ./...
+
+Which will generate the code for the parser, and place it in the parsing package. You can then use the generated code
+by importing the parsing package.
+
+There are no hard and fast rules on this. It is just a recommendation. You can generate the code in any way and to anywhere you like.
+
+# Copyright Notice
+
+Copyright (c) 2012-2023 The ANTLR Project. All rights reserved.
+
+Use of this file is governed by the BSD 3-clause license, which can be found in the [LICENSE.txt] file in the project root.
+
+[target languages]: https://github.com/antlr/antlr4/tree/master/runtime
+[LICENSE.txt]: https://github.com/antlr/antlr4/blob/master/LICENSE.txt
+[ANTLR docs]: https://github.com/antlr/antlr4/blob/master/doc/index.md
+[new repo]: https://github.com/antlr4-go/antlr
+[Jim Idle]: https://github.com/jimidle
+[Go runtime docs]: https://github.com/antlr/antlr4/blob/master/doc/go-target.md
+*/
+package antlr
diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn.go b/vendor/github.com/antlr4-go/antlr/v4/atn.go
new file mode 100644
index 000000000..cdeefed24
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/atn.go
@@ -0,0 +1,179 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import "sync"
+
+// ATNInvalidAltNumber is used to represent an ALT number that has yet to be calculated or
+// which is invalid for a particular struct such as [*antlr.BaseRuleContext]
+var ATNInvalidAltNumber int
+
+// ATN represents an “[Augmented Transition Network]”, though general in ANTLR the term
+// “Augmented Recursive Transition Network” though there are some descriptions of “[Recursive Transition Network]”
+// in existence.
+//
+// ATNs represent the main networks in the system and are serialized by the code generator and support [ALL(*)].
+//
+// [Augmented Transition Network]: https://en.wikipedia.org/wiki/Augmented_transition_network
+// [ALL(*)]: https://www.antlr.org/papers/allstar-techreport.pdf
+// [Recursive Transition Network]: https://en.wikipedia.org/wiki/Recursive_transition_network
+type ATN struct {
+
+ // DecisionToState is the decision points for all rules, sub-rules, optional
+ // blocks, ()+, ()*, etc. Each sub-rule/rule is a decision point, and we must track them, so we
+ // can go back later and build DFA predictors for them. This includes
+ // all the rules, sub-rules, optional blocks, ()+, ()* etc...
+ DecisionToState []DecisionState
+
+ // grammarType is the ATN type and is used for deserializing ATNs from strings.
+ grammarType int
+
+ // lexerActions is referenced by action transitions in the ATN for lexer ATNs.
+ lexerActions []LexerAction
+
+ // maxTokenType is the maximum value for any symbol recognized by a transition in the ATN.
+ maxTokenType int
+
+ modeNameToStartState map[string]*TokensStartState
+
+ modeToStartState []*TokensStartState
+
+ // ruleToStartState maps from rule index to starting state number.
+ ruleToStartState []*RuleStartState
+
+ // ruleToStopState maps from rule index to stop state number.
+ ruleToStopState []*RuleStopState
+
+ // ruleToTokenType maps the rule index to the resulting token type for lexer
+ // ATNs. For parser ATNs, it maps the rule index to the generated bypass token
+ // type if ATNDeserializationOptions.isGenerateRuleBypassTransitions was
+ // specified, and otherwise is nil.
+ ruleToTokenType []int
+
+ // ATNStates is a list of all states in the ATN, ordered by state number.
+ //
+ states []ATNState
+
+ mu sync.Mutex
+ stateMu sync.RWMutex
+ edgeMu sync.RWMutex
+}
+
+// NewATN returns a new ATN struct representing the given grammarType and is used
+// for runtime deserialization of ATNs from the code generated by the ANTLR tool
+func NewATN(grammarType int, maxTokenType int) *ATN {
+ return &ATN{
+ grammarType: grammarType,
+ maxTokenType: maxTokenType,
+ modeNameToStartState: make(map[string]*TokensStartState),
+ }
+}
+
+// NextTokensInContext computes and returns the set of valid tokens that can occur starting
+// in state s. If ctx is nil, the set of tokens will not include what can follow
+// the rule surrounding s. In other words, the set will be restricted to tokens
+// reachable staying within the rule of s.
+func (a *ATN) NextTokensInContext(s ATNState, ctx RuleContext) *IntervalSet {
+ return NewLL1Analyzer(a).Look(s, nil, ctx)
+}
+
+// NextTokensNoContext computes and returns the set of valid tokens that can occur starting
+// in state s and staying in same rule. [antlr.Token.EPSILON] is in set if we reach end of
+// rule.
+func (a *ATN) NextTokensNoContext(s ATNState) *IntervalSet {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+ iset := s.GetNextTokenWithinRule()
+ if iset == nil {
+ iset = a.NextTokensInContext(s, nil)
+ iset.readOnly = true
+ s.SetNextTokenWithinRule(iset)
+ }
+ return iset
+}
+
+// NextTokens computes and returns the set of valid tokens starting in state s, by
+// calling either [NextTokensNoContext] (ctx == nil) or [NextTokensInContext] (ctx != nil).
+func (a *ATN) NextTokens(s ATNState, ctx RuleContext) *IntervalSet {
+ if ctx == nil {
+ return a.NextTokensNoContext(s)
+ }
+
+ return a.NextTokensInContext(s, ctx)
+}
+
+func (a *ATN) addState(state ATNState) {
+ if state != nil {
+ state.SetATN(a)
+ state.SetStateNumber(len(a.states))
+ }
+
+ a.states = append(a.states, state)
+}
+
+func (a *ATN) removeState(state ATNState) {
+ a.states[state.GetStateNumber()] = nil // Just free the memory; don't shift states in the slice
+}
+
+func (a *ATN) defineDecisionState(s DecisionState) int {
+ a.DecisionToState = append(a.DecisionToState, s)
+ s.setDecision(len(a.DecisionToState) - 1)
+
+ return s.getDecision()
+}
+
+func (a *ATN) getDecisionState(decision int) DecisionState {
+ if len(a.DecisionToState) == 0 {
+ return nil
+ }
+
+ return a.DecisionToState[decision]
+}
+
+// getExpectedTokens computes the set of input symbols which could follow ATN
+// state number stateNumber in the specified full parse context ctx and returns
+// the set of potentially valid input symbols which could follow the specified
+// state in the specified context. This method considers the complete parser
+// context, but does not evaluate semantic predicates (i.e. all predicates
+// encountered during the calculation are assumed true). If a path in the ATN
+// exists from the starting state to the RuleStopState of the outermost context
+// without Matching any symbols, Token.EOF is added to the returned set.
+//
+// A nil ctx defaults to ParserRuleContext.EMPTY.
+//
+// It panics if the ATN does not contain state stateNumber.
+func (a *ATN) getExpectedTokens(stateNumber int, ctx RuleContext) *IntervalSet {
+ if stateNumber < 0 || stateNumber >= len(a.states) {
+ panic("Invalid state number.")
+ }
+
+ s := a.states[stateNumber]
+ following := a.NextTokens(s, nil)
+
+ if !following.contains(TokenEpsilon) {
+ return following
+ }
+
+ expected := NewIntervalSet()
+
+ expected.addSet(following)
+ expected.removeOne(TokenEpsilon)
+
+ for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) {
+ invokingState := a.states[ctx.GetInvokingState()]
+ rt := invokingState.GetTransitions()[0]
+
+ following = a.NextTokens(rt.(*RuleTransition).followState, nil)
+ expected.addSet(following)
+ expected.removeOne(TokenEpsilon)
+ ctx = ctx.GetParent().(RuleContext)
+ }
+
+ if following.contains(TokenEpsilon) {
+ expected.addOne(TokenEOF)
+ }
+
+ return expected
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_config.go b/vendor/github.com/antlr4-go/antlr/v4/atn_config.go
new file mode 100644
index 000000000..a83f25d34
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/atn_config.go
@@ -0,0 +1,335 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+)
+
+const (
+ lexerConfig = iota // Indicates that this ATNConfig is for a lexer
+ parserConfig // Indicates that this ATNConfig is for a parser
+)
+
+// ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic
+// context). The syntactic context is a graph-structured stack node whose
+// path(s) to the root is the rule invocation(s) chain used to arrive in the
+// state. The semantic context is the tree of semantic predicates encountered
+// before reaching an ATN state.
+type ATNConfig struct {
+ precedenceFilterSuppressed bool
+ state ATNState
+ alt int
+ context *PredictionContext
+ semanticContext SemanticContext
+ reachesIntoOuterContext int
+ cType int // lexerConfig or parserConfig
+ lexerActionExecutor *LexerActionExecutor
+ passedThroughNonGreedyDecision bool
+}
+
+// NewATNConfig6 creates a new ATNConfig instance given a state, alt and context only
+func NewATNConfig6(state ATNState, alt int, context *PredictionContext) *ATNConfig {
+ return NewATNConfig5(state, alt, context, SemanticContextNone)
+}
+
+// NewATNConfig5 creates a new ATNConfig instance given a state, alt, context and semantic context
+func NewATNConfig5(state ATNState, alt int, context *PredictionContext, semanticContext SemanticContext) *ATNConfig {
+ if semanticContext == nil {
+ panic("semanticContext cannot be nil") // TODO: Necessary?
+ }
+
+ pac := &ATNConfig{}
+ pac.state = state
+ pac.alt = alt
+ pac.context = context
+ pac.semanticContext = semanticContext
+ pac.cType = parserConfig
+ return pac
+}
+
+// NewATNConfig4 creates a new ATNConfig instance given an existing config, and a state only
+func NewATNConfig4(c *ATNConfig, state ATNState) *ATNConfig {
+ return NewATNConfig(c, state, c.GetContext(), c.GetSemanticContext())
+}
+
+// NewATNConfig3 creates a new ATNConfig instance given an existing config, a state and a semantic context
+func NewATNConfig3(c *ATNConfig, state ATNState, semanticContext SemanticContext) *ATNConfig {
+ return NewATNConfig(c, state, c.GetContext(), semanticContext)
+}
+
+// NewATNConfig2 creates a new ATNConfig instance given an existing config, and a context only
+func NewATNConfig2(c *ATNConfig, semanticContext SemanticContext) *ATNConfig {
+ return NewATNConfig(c, c.GetState(), c.GetContext(), semanticContext)
+}
+
+// NewATNConfig1 creates a new ATNConfig instance given an existing config, a state, and a context only
+func NewATNConfig1(c *ATNConfig, state ATNState, context *PredictionContext) *ATNConfig {
+ return NewATNConfig(c, state, context, c.GetSemanticContext())
+}
+
+// NewATNConfig creates a new ATNConfig instance given an existing config, a state, a context and a semantic context, other 'constructors'
+// are just wrappers around this one.
+func NewATNConfig(c *ATNConfig, state ATNState, context *PredictionContext, semanticContext SemanticContext) *ATNConfig {
+ if semanticContext == nil {
+ panic("semanticContext cannot be nil") // TODO: Remove this - probably put here for some bug that is now fixed
+ }
+ b := &ATNConfig{}
+ b.InitATNConfig(c, state, c.GetAlt(), context, semanticContext)
+ b.cType = parserConfig
+ return b
+}
+
+func (a *ATNConfig) InitATNConfig(c *ATNConfig, state ATNState, alt int, context *PredictionContext, semanticContext SemanticContext) {
+
+ a.state = state
+ a.alt = alt
+ a.context = context
+ a.semanticContext = semanticContext
+ a.reachesIntoOuterContext = c.GetReachesIntoOuterContext()
+ a.precedenceFilterSuppressed = c.getPrecedenceFilterSuppressed()
+}
+
+func (a *ATNConfig) getPrecedenceFilterSuppressed() bool {
+ return a.precedenceFilterSuppressed
+}
+
+func (a *ATNConfig) setPrecedenceFilterSuppressed(v bool) {
+ a.precedenceFilterSuppressed = v
+}
+
+// GetState returns the ATN state associated with this configuration
+func (a *ATNConfig) GetState() ATNState {
+ return a.state
+}
+
+// GetAlt returns the alternative associated with this configuration
+func (a *ATNConfig) GetAlt() int {
+ return a.alt
+}
+
+// SetContext sets the rule invocation stack associated with this configuration
+func (a *ATNConfig) SetContext(v *PredictionContext) {
+ a.context = v
+}
+
+// GetContext returns the rule invocation stack associated with this configuration
+func (a *ATNConfig) GetContext() *PredictionContext {
+ return a.context
+}
+
+// GetSemanticContext returns the semantic context associated with this configuration
+func (a *ATNConfig) GetSemanticContext() SemanticContext {
+ return a.semanticContext
+}
+
+// GetReachesIntoOuterContext returns the count of references to an outer context from this configuration
+func (a *ATNConfig) GetReachesIntoOuterContext() int {
+ return a.reachesIntoOuterContext
+}
+
+// SetReachesIntoOuterContext sets the count of references to an outer context from this configuration
+func (a *ATNConfig) SetReachesIntoOuterContext(v int) {
+ a.reachesIntoOuterContext = v
+}
+
+// Equals is the default comparison function for an ATNConfig when no specialist implementation is required
+// for a collection.
+//
+// An ATN configuration is equal to another if both have the same state, they
+// predict the same alternative, and syntactic/semantic contexts are the same.
+func (a *ATNConfig) Equals(o Collectable[*ATNConfig]) bool {
+ switch a.cType {
+ case lexerConfig:
+ return a.LEquals(o)
+ case parserConfig:
+ return a.PEquals(o)
+ default:
+ panic("Invalid ATNConfig type")
+ }
+}
+
+// PEquals is the default comparison function for a Parser ATNConfig when no specialist implementation is required
+// for a collection.
+//
+// An ATN configuration is equal to another if both have the same state, they
+// predict the same alternative, and syntactic/semantic contexts are the same.
+func (a *ATNConfig) PEquals(o Collectable[*ATNConfig]) bool {
+ var other, ok = o.(*ATNConfig)
+
+ if !ok {
+ return false
+ }
+ if a == other {
+ return true
+ } else if other == nil {
+ return false
+ }
+
+ var equal bool
+
+ if a.context == nil {
+ equal = other.context == nil
+ } else {
+ equal = a.context.Equals(other.context)
+ }
+
+ var (
+ nums = a.state.GetStateNumber() == other.state.GetStateNumber()
+ alts = a.alt == other.alt
+ cons = a.semanticContext.Equals(other.semanticContext)
+ sups = a.precedenceFilterSuppressed == other.precedenceFilterSuppressed
+ )
+
+ return nums && alts && cons && sups && equal
+}
+
+// Hash is the default hash function for a parser ATNConfig, when no specialist hash function
+// is required for a collection
+func (a *ATNConfig) Hash() int {
+ switch a.cType {
+ case lexerConfig:
+ return a.LHash()
+ case parserConfig:
+ return a.PHash()
+ default:
+ panic("Invalid ATNConfig type")
+ }
+}
+
+// PHash is the default hash function for a parser ATNConfig, when no specialist hash function
+// is required for a collection
+func (a *ATNConfig) PHash() int {
+ var c int
+ if a.context != nil {
+ c = a.context.Hash()
+ }
+
+ h := murmurInit(7)
+ h = murmurUpdate(h, a.state.GetStateNumber())
+ h = murmurUpdate(h, a.alt)
+ h = murmurUpdate(h, c)
+ h = murmurUpdate(h, a.semanticContext.Hash())
+ return murmurFinish(h, 4)
+}
+
+// String returns a string representation of the ATNConfig, usually used for debugging purposes
+func (a *ATNConfig) String() string {
+ var s1, s2, s3 string
+
+ if a.context != nil {
+ s1 = ",[" + fmt.Sprint(a.context) + "]"
+ }
+
+ if a.semanticContext != SemanticContextNone {
+ s2 = "," + fmt.Sprint(a.semanticContext)
+ }
+
+ if a.reachesIntoOuterContext > 0 {
+ s3 = ",up=" + fmt.Sprint(a.reachesIntoOuterContext)
+ }
+
+ return fmt.Sprintf("(%v,%v%v%v%v)", a.state, a.alt, s1, s2, s3)
+}
+
+func NewLexerATNConfig6(state ATNState, alt int, context *PredictionContext) *ATNConfig {
+ lac := &ATNConfig{}
+ lac.state = state
+ lac.alt = alt
+ lac.context = context
+ lac.semanticContext = SemanticContextNone
+ lac.cType = lexerConfig
+ return lac
+}
+
+func NewLexerATNConfig4(c *ATNConfig, state ATNState) *ATNConfig {
+ lac := &ATNConfig{}
+ lac.lexerActionExecutor = c.lexerActionExecutor
+ lac.passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state)
+ lac.InitATNConfig(c, state, c.GetAlt(), c.GetContext(), c.GetSemanticContext())
+ lac.cType = lexerConfig
+ return lac
+}
+
+func NewLexerATNConfig3(c *ATNConfig, state ATNState, lexerActionExecutor *LexerActionExecutor) *ATNConfig {
+ lac := &ATNConfig{}
+ lac.lexerActionExecutor = lexerActionExecutor
+ lac.passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state)
+ lac.InitATNConfig(c, state, c.GetAlt(), c.GetContext(), c.GetSemanticContext())
+ lac.cType = lexerConfig
+ return lac
+}
+
+func NewLexerATNConfig2(c *ATNConfig, state ATNState, context *PredictionContext) *ATNConfig {
+ lac := &ATNConfig{}
+ lac.lexerActionExecutor = c.lexerActionExecutor
+ lac.passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state)
+ lac.InitATNConfig(c, state, c.GetAlt(), context, c.GetSemanticContext())
+ lac.cType = lexerConfig
+ return lac
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func NewLexerATNConfig1(state ATNState, alt int, context *PredictionContext) *ATNConfig {
+ lac := &ATNConfig{}
+ lac.state = state
+ lac.alt = alt
+ lac.context = context
+ lac.semanticContext = SemanticContextNone
+ lac.cType = lexerConfig
+ return lac
+}
+
+// LHash is the default hash function for Lexer ATNConfig objects, it can be used directly or via
+// the default comparator [ObjEqComparator].
+func (a *ATNConfig) LHash() int {
+ var f int
+ if a.passedThroughNonGreedyDecision {
+ f = 1
+ } else {
+ f = 0
+ }
+ h := murmurInit(7)
+ h = murmurUpdate(h, a.state.GetStateNumber())
+ h = murmurUpdate(h, a.alt)
+ h = murmurUpdate(h, a.context.Hash())
+ h = murmurUpdate(h, a.semanticContext.Hash())
+ h = murmurUpdate(h, f)
+ h = murmurUpdate(h, a.lexerActionExecutor.Hash())
+ h = murmurFinish(h, 6)
+ return h
+}
+
+// LEquals is the default comparison function for Lexer ATNConfig objects, it can be used directly or via
+// the default comparator [ObjEqComparator].
+func (a *ATNConfig) LEquals(other Collectable[*ATNConfig]) bool {
+ var otherT, ok = other.(*ATNConfig)
+ if !ok {
+ return false
+ } else if a == otherT {
+ return true
+ } else if a.passedThroughNonGreedyDecision != otherT.passedThroughNonGreedyDecision {
+ return false
+ }
+
+ switch {
+ case a.lexerActionExecutor == nil && otherT.lexerActionExecutor == nil:
+ return true
+ case a.lexerActionExecutor != nil && otherT.lexerActionExecutor != nil:
+ if !a.lexerActionExecutor.Equals(otherT.lexerActionExecutor) {
+ return false
+ }
+ default:
+ return false // One but not both, are nil
+ }
+
+ return a.PEquals(otherT)
+}
+
+func checkNonGreedyDecision(source *ATNConfig, target ATNState) bool {
+ var ds, ok = target.(DecisionState)
+
+ return source.passedThroughNonGreedyDecision || (ok && ds.getNonGreedy())
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_config_set.go b/vendor/github.com/antlr4-go/antlr/v4/atn_config_set.go
new file mode 100644
index 000000000..52dbaf806
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/atn_config_set.go
@@ -0,0 +1,301 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+)
+
+// ATNConfigSet is a specialized set of ATNConfig that tracks information
+// about its elements and can combine similar configurations using a
+// graph-structured stack.
+type ATNConfigSet struct {
+ cachedHash int
+
+ // configLookup is used to determine whether two ATNConfigSets are equal. We
+ // need all configurations with the same (s, i, _, semctx) to be equal. A key
+ // effectively doubles the number of objects associated with ATNConfigs. All
+ // keys are hashed by (s, i, _, pi), not including the context. Wiped out when
+ // read-only because a set becomes a DFA state.
+ configLookup *JStore[*ATNConfig, Comparator[*ATNConfig]]
+
+ // configs is the added elements that did not match an existing key in configLookup
+ configs []*ATNConfig
+
+ // TODO: These fields make me pretty uncomfortable, but it is nice to pack up
+ // info together because it saves re-computation. Can we track conflicts as they
+ // are added to save scanning configs later?
+ conflictingAlts *BitSet
+
+ // dipsIntoOuterContext is used by parsers and lexers. In a lexer, it indicates
+ // we hit a pred while computing a closure operation. Do not make a DFA state
+ // from the ATNConfigSet in this case. TODO: How is this used by parsers?
+ dipsIntoOuterContext bool
+
+ // fullCtx is whether it is part of a full context LL prediction. Used to
+ // determine how to merge $. It is a wildcard with SLL, but not for an LL
+ // context merge.
+ fullCtx bool
+
+ // Used in parser and lexer. In lexer, it indicates we hit a pred
+ // while computing a closure operation. Don't make a DFA state from this set.
+ hasSemanticContext bool
+
+ // readOnly is whether it is read-only. Do not
+ // allow any code to manipulate the set if true because DFA states will point at
+ // sets and those must not change. It not, protect other fields; conflictingAlts
+ // in particular, which is assigned after readOnly.
+ readOnly bool
+
+ // TODO: These fields make me pretty uncomfortable, but it is nice to pack up
+ // info together because it saves re-computation. Can we track conflicts as they
+ // are added to save scanning configs later?
+ uniqueAlt int
+}
+
+// Alts returns the combined set of alts for all the configurations in this set.
+func (b *ATNConfigSet) Alts() *BitSet {
+ alts := NewBitSet()
+ for _, it := range b.configs {
+ alts.add(it.GetAlt())
+ }
+ return alts
+}
+
+// NewATNConfigSet creates a new ATNConfigSet instance.
+func NewATNConfigSet(fullCtx bool) *ATNConfigSet {
+ return &ATNConfigSet{
+ cachedHash: -1,
+ configLookup: NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfCompInst, ATNConfigLookupCollection, "NewATNConfigSet()"),
+ fullCtx: fullCtx,
+ }
+}
+
+// Add merges contexts with existing configs for (s, i, pi, _),
+// where 's' is the ATNConfig.state, 'i' is the ATNConfig.alt, and
+// 'pi' is the [ATNConfig].semanticContext.
+//
+// We use (s,i,pi) as the key.
+// Updates dipsIntoOuterContext and hasSemanticContext when necessary.
+func (b *ATNConfigSet) Add(config *ATNConfig, mergeCache *JPCMap) bool {
+ if b.readOnly {
+ panic("set is read-only")
+ }
+
+ if config.GetSemanticContext() != SemanticContextNone {
+ b.hasSemanticContext = true
+ }
+
+ if config.GetReachesIntoOuterContext() > 0 {
+ b.dipsIntoOuterContext = true
+ }
+
+ existing, present := b.configLookup.Put(config)
+
+ // The config was not already in the set
+ //
+ if !present {
+ b.cachedHash = -1
+ b.configs = append(b.configs, config) // Track order here
+ return true
+ }
+
+ // Merge a previous (s, i, pi, _) with it and save the result
+ rootIsWildcard := !b.fullCtx
+ merged := merge(existing.GetContext(), config.GetContext(), rootIsWildcard, mergeCache)
+
+ // No need to check for existing.context because config.context is in the cache,
+ // since the only way to create new graphs is the "call rule" and here. We cache
+ // at both places.
+ existing.SetReachesIntoOuterContext(intMax(existing.GetReachesIntoOuterContext(), config.GetReachesIntoOuterContext()))
+
+ // Preserve the precedence filter suppression during the merge
+ if config.getPrecedenceFilterSuppressed() {
+ existing.setPrecedenceFilterSuppressed(true)
+ }
+
+ // Replace the context because there is no need to do alt mapping
+ existing.SetContext(merged)
+
+ return true
+}
+
+// GetStates returns the set of states represented by all configurations in this config set
+func (b *ATNConfigSet) GetStates() *JStore[ATNState, Comparator[ATNState]] {
+
+ // states uses the standard comparator and Hash() provided by the ATNState instance
+ //
+ states := NewJStore[ATNState, Comparator[ATNState]](aStateEqInst, ATNStateCollection, "ATNConfigSet.GetStates()")
+
+ for i := 0; i < len(b.configs); i++ {
+ states.Put(b.configs[i].GetState())
+ }
+
+ return states
+}
+
+func (b *ATNConfigSet) GetPredicates() []SemanticContext {
+ predicates := make([]SemanticContext, 0)
+
+ for i := 0; i < len(b.configs); i++ {
+ c := b.configs[i].GetSemanticContext()
+
+ if c != SemanticContextNone {
+ predicates = append(predicates, c)
+ }
+ }
+
+ return predicates
+}
+
+func (b *ATNConfigSet) OptimizeConfigs(interpreter *BaseATNSimulator) {
+ if b.readOnly {
+ panic("set is read-only")
+ }
+
+ // Empty indicate no optimization is possible
+ if b.configLookup == nil || b.configLookup.Len() == 0 {
+ return
+ }
+
+ for i := 0; i < len(b.configs); i++ {
+ config := b.configs[i]
+ config.SetContext(interpreter.getCachedContext(config.GetContext()))
+ }
+}
+
+func (b *ATNConfigSet) AddAll(coll []*ATNConfig) bool {
+ for i := 0; i < len(coll); i++ {
+ b.Add(coll[i], nil)
+ }
+
+ return false
+}
+
+// Compare The configs are only equal if they are in the same order and their Equals function returns true.
+// Java uses ArrayList.equals(), which requires the same order.
+func (b *ATNConfigSet) Compare(bs *ATNConfigSet) bool {
+ if len(b.configs) != len(bs.configs) {
+ return false
+ }
+ for i := 0; i < len(b.configs); i++ {
+ if !b.configs[i].Equals(bs.configs[i]) {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (b *ATNConfigSet) Equals(other Collectable[ATNConfig]) bool {
+ if b == other {
+ return true
+ } else if _, ok := other.(*ATNConfigSet); !ok {
+ return false
+ }
+
+ other2 := other.(*ATNConfigSet)
+ var eca bool
+ switch {
+ case b.conflictingAlts == nil && other2.conflictingAlts == nil:
+ eca = true
+ case b.conflictingAlts != nil && other2.conflictingAlts != nil:
+ eca = b.conflictingAlts.equals(other2.conflictingAlts)
+ }
+ return b.configs != nil &&
+ b.fullCtx == other2.fullCtx &&
+ b.uniqueAlt == other2.uniqueAlt &&
+ eca &&
+ b.hasSemanticContext == other2.hasSemanticContext &&
+ b.dipsIntoOuterContext == other2.dipsIntoOuterContext &&
+ b.Compare(other2)
+}
+
+func (b *ATNConfigSet) Hash() int {
+ if b.readOnly {
+ if b.cachedHash == -1 {
+ b.cachedHash = b.hashCodeConfigs()
+ }
+
+ return b.cachedHash
+ }
+
+ return b.hashCodeConfigs()
+}
+
+func (b *ATNConfigSet) hashCodeConfigs() int {
+ h := 1
+ for _, config := range b.configs {
+ h = 31*h + config.Hash()
+ }
+ return h
+}
+
+func (b *ATNConfigSet) Contains(item *ATNConfig) bool {
+ if b.readOnly {
+ panic("not implemented for read-only sets")
+ }
+ if b.configLookup == nil {
+ return false
+ }
+ return b.configLookup.Contains(item)
+}
+
+func (b *ATNConfigSet) ContainsFast(item *ATNConfig) bool {
+ return b.Contains(item)
+}
+
+func (b *ATNConfigSet) Clear() {
+ if b.readOnly {
+ panic("set is read-only")
+ }
+ b.configs = make([]*ATNConfig, 0)
+ b.cachedHash = -1
+ b.configLookup = NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfCompInst, ATNConfigLookupCollection, "NewATNConfigSet()")
+}
+
+func (b *ATNConfigSet) String() string {
+
+ s := "["
+
+ for i, c := range b.configs {
+ s += c.String()
+
+ if i != len(b.configs)-1 {
+ s += ", "
+ }
+ }
+
+ s += "]"
+
+ if b.hasSemanticContext {
+ s += ",hasSemanticContext=" + fmt.Sprint(b.hasSemanticContext)
+ }
+
+ if b.uniqueAlt != ATNInvalidAltNumber {
+ s += ",uniqueAlt=" + fmt.Sprint(b.uniqueAlt)
+ }
+
+ if b.conflictingAlts != nil {
+ s += ",conflictingAlts=" + b.conflictingAlts.String()
+ }
+
+ if b.dipsIntoOuterContext {
+ s += ",dipsIntoOuterContext"
+ }
+
+ return s
+}
+
+// NewOrderedATNConfigSet creates a config set with a slightly different Hash/Equal pair
+// for use in lexers.
+func NewOrderedATNConfigSet() *ATNConfigSet {
+ return &ATNConfigSet{
+ cachedHash: -1,
+ // This set uses the standard Hash() and Equals() from ATNConfig
+ configLookup: NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ATNConfigCollection, "ATNConfigSet.NewOrderedATNConfigSet()"),
+ fullCtx: false,
+ }
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_deserialization_options.go b/vendor/github.com/antlr4-go/antlr/v4/atn_deserialization_options.go
new file mode 100644
index 000000000..bdb30b362
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/atn_deserialization_options.go
@@ -0,0 +1,62 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import "errors"
+
+var defaultATNDeserializationOptions = ATNDeserializationOptions{true, true, false}
+
+type ATNDeserializationOptions struct {
+ readOnly bool
+ verifyATN bool
+ generateRuleBypassTransitions bool
+}
+
+func (opts *ATNDeserializationOptions) ReadOnly() bool {
+ return opts.readOnly
+}
+
+func (opts *ATNDeserializationOptions) SetReadOnly(readOnly bool) {
+ if opts.readOnly {
+ panic(errors.New("cannot mutate read only ATNDeserializationOptions"))
+ }
+ opts.readOnly = readOnly
+}
+
+func (opts *ATNDeserializationOptions) VerifyATN() bool {
+ return opts.verifyATN
+}
+
+func (opts *ATNDeserializationOptions) SetVerifyATN(verifyATN bool) {
+ if opts.readOnly {
+ panic(errors.New("cannot mutate read only ATNDeserializationOptions"))
+ }
+ opts.verifyATN = verifyATN
+}
+
+func (opts *ATNDeserializationOptions) GenerateRuleBypassTransitions() bool {
+ return opts.generateRuleBypassTransitions
+}
+
+func (opts *ATNDeserializationOptions) SetGenerateRuleBypassTransitions(generateRuleBypassTransitions bool) {
+ if opts.readOnly {
+ panic(errors.New("cannot mutate read only ATNDeserializationOptions"))
+ }
+ opts.generateRuleBypassTransitions = generateRuleBypassTransitions
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func DefaultATNDeserializationOptions() *ATNDeserializationOptions {
+ return NewATNDeserializationOptions(&defaultATNDeserializationOptions)
+}
+
+func NewATNDeserializationOptions(other *ATNDeserializationOptions) *ATNDeserializationOptions {
+ o := new(ATNDeserializationOptions)
+ if other != nil {
+ *o = *other
+ o.readOnly = false
+ }
+ return o
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_deserializer.go b/vendor/github.com/antlr4-go/antlr/v4/atn_deserializer.go
new file mode 100644
index 000000000..2dcb9ae11
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/atn_deserializer.go
@@ -0,0 +1,684 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+)
+
+const serializedVersion = 4
+
+type loopEndStateIntPair struct {
+ item0 *LoopEndState
+ item1 int
+}
+
+type blockStartStateIntPair struct {
+ item0 BlockStartState
+ item1 int
+}
+
+type ATNDeserializer struct {
+ options *ATNDeserializationOptions
+ data []int32
+ pos int
+}
+
+func NewATNDeserializer(options *ATNDeserializationOptions) *ATNDeserializer {
+ if options == nil {
+ options = &defaultATNDeserializationOptions
+ }
+
+ return &ATNDeserializer{options: options}
+}
+
+//goland:noinspection GoUnusedFunction
+func stringInSlice(a string, list []string) int {
+ for i, b := range list {
+ if b == a {
+ return i
+ }
+ }
+
+ return -1
+}
+
+func (a *ATNDeserializer) Deserialize(data []int32) *ATN {
+ a.data = data
+ a.pos = 0
+ a.checkVersion()
+
+ atn := a.readATN()
+
+ a.readStates(atn)
+ a.readRules(atn)
+ a.readModes(atn)
+
+ sets := a.readSets(atn, nil)
+
+ a.readEdges(atn, sets)
+ a.readDecisions(atn)
+ a.readLexerActions(atn)
+ a.markPrecedenceDecisions(atn)
+ a.verifyATN(atn)
+
+ if a.options.GenerateRuleBypassTransitions() && atn.grammarType == ATNTypeParser {
+ a.generateRuleBypassTransitions(atn)
+ // Re-verify after modification
+ a.verifyATN(atn)
+ }
+
+ return atn
+
+}
+
+func (a *ATNDeserializer) checkVersion() {
+ version := a.readInt()
+
+ if version != serializedVersion {
+ panic("Could not deserialize ATN with version " + strconv.Itoa(version) + " (expected " + strconv.Itoa(serializedVersion) + ").")
+ }
+}
+
+func (a *ATNDeserializer) readATN() *ATN {
+ grammarType := a.readInt()
+ maxTokenType := a.readInt()
+
+ return NewATN(grammarType, maxTokenType)
+}
+
+func (a *ATNDeserializer) readStates(atn *ATN) {
+ nstates := a.readInt()
+
+ // Allocate worst case size.
+ loopBackStateNumbers := make([]loopEndStateIntPair, 0, nstates)
+ endStateNumbers := make([]blockStartStateIntPair, 0, nstates)
+
+ // Preallocate states slice.
+ atn.states = make([]ATNState, 0, nstates)
+
+ for i := 0; i < nstates; i++ {
+ stype := a.readInt()
+
+ // Ignore bad types of states
+ if stype == ATNStateInvalidType {
+ atn.addState(nil)
+ continue
+ }
+
+ ruleIndex := a.readInt()
+
+ s := a.stateFactory(stype, ruleIndex)
+
+ if stype == ATNStateLoopEnd {
+ loopBackStateNumber := a.readInt()
+
+ loopBackStateNumbers = append(loopBackStateNumbers, loopEndStateIntPair{s.(*LoopEndState), loopBackStateNumber})
+ } else if s2, ok := s.(BlockStartState); ok {
+ endStateNumber := a.readInt()
+
+ endStateNumbers = append(endStateNumbers, blockStartStateIntPair{s2, endStateNumber})
+ }
+
+ atn.addState(s)
+ }
+
+ // Delay the assignment of loop back and end states until we know all the state
+ // instances have been initialized
+ for _, pair := range loopBackStateNumbers {
+ pair.item0.loopBackState = atn.states[pair.item1]
+ }
+
+ for _, pair := range endStateNumbers {
+ pair.item0.setEndState(atn.states[pair.item1].(*BlockEndState))
+ }
+
+ numNonGreedyStates := a.readInt()
+ for j := 0; j < numNonGreedyStates; j++ {
+ stateNumber := a.readInt()
+
+ atn.states[stateNumber].(DecisionState).setNonGreedy(true)
+ }
+
+ numPrecedenceStates := a.readInt()
+ for j := 0; j < numPrecedenceStates; j++ {
+ stateNumber := a.readInt()
+
+ atn.states[stateNumber].(*RuleStartState).isPrecedenceRule = true
+ }
+}
+
+func (a *ATNDeserializer) readRules(atn *ATN) {
+ nrules := a.readInt()
+
+ if atn.grammarType == ATNTypeLexer {
+ atn.ruleToTokenType = make([]int, nrules)
+ }
+
+ atn.ruleToStartState = make([]*RuleStartState, nrules)
+
+ for i := range atn.ruleToStartState {
+ s := a.readInt()
+ startState := atn.states[s].(*RuleStartState)
+
+ atn.ruleToStartState[i] = startState
+
+ if atn.grammarType == ATNTypeLexer {
+ tokenType := a.readInt()
+
+ atn.ruleToTokenType[i] = tokenType
+ }
+ }
+
+ atn.ruleToStopState = make([]*RuleStopState, nrules)
+
+ for _, state := range atn.states {
+ if s2, ok := state.(*RuleStopState); ok {
+ atn.ruleToStopState[s2.ruleIndex] = s2
+ atn.ruleToStartState[s2.ruleIndex].stopState = s2
+ }
+ }
+}
+
+func (a *ATNDeserializer) readModes(atn *ATN) {
+ nmodes := a.readInt()
+ atn.modeToStartState = make([]*TokensStartState, nmodes)
+
+ for i := range atn.modeToStartState {
+ s := a.readInt()
+
+ atn.modeToStartState[i] = atn.states[s].(*TokensStartState)
+ }
+}
+
+func (a *ATNDeserializer) readSets(_ *ATN, sets []*IntervalSet) []*IntervalSet {
+ m := a.readInt()
+
+ // Preallocate the needed capacity.
+ if cap(sets)-len(sets) < m {
+ isets := make([]*IntervalSet, len(sets), len(sets)+m)
+ copy(isets, sets)
+ sets = isets
+ }
+
+ for i := 0; i < m; i++ {
+ iset := NewIntervalSet()
+
+ sets = append(sets, iset)
+
+ n := a.readInt()
+ containsEOF := a.readInt()
+
+ if containsEOF != 0 {
+ iset.addOne(-1)
+ }
+
+ for j := 0; j < n; j++ {
+ i1 := a.readInt()
+ i2 := a.readInt()
+
+ iset.addRange(i1, i2)
+ }
+ }
+
+ return sets
+}
+
+func (a *ATNDeserializer) readEdges(atn *ATN, sets []*IntervalSet) {
+ nedges := a.readInt()
+
+ for i := 0; i < nedges; i++ {
+ var (
+ src = a.readInt()
+ trg = a.readInt()
+ ttype = a.readInt()
+ arg1 = a.readInt()
+ arg2 = a.readInt()
+ arg3 = a.readInt()
+ trans = a.edgeFactory(atn, ttype, src, trg, arg1, arg2, arg3, sets)
+ srcState = atn.states[src]
+ )
+
+ srcState.AddTransition(trans, -1)
+ }
+
+ // Edges for rule stop states can be derived, so they are not serialized
+ for _, state := range atn.states {
+ for _, t := range state.GetTransitions() {
+ var rt, ok = t.(*RuleTransition)
+
+ if !ok {
+ continue
+ }
+
+ outermostPrecedenceReturn := -1
+
+ if atn.ruleToStartState[rt.getTarget().GetRuleIndex()].isPrecedenceRule {
+ if rt.precedence == 0 {
+ outermostPrecedenceReturn = rt.getTarget().GetRuleIndex()
+ }
+ }
+
+ trans := NewEpsilonTransition(rt.followState, outermostPrecedenceReturn)
+
+ atn.ruleToStopState[rt.getTarget().GetRuleIndex()].AddTransition(trans, -1)
+ }
+ }
+
+ for _, state := range atn.states {
+ if s2, ok := state.(BlockStartState); ok {
+ // We need to know the end state to set its start state
+ if s2.getEndState() == nil {
+ panic("IllegalState")
+ }
+
+ // Block end states can only be associated to a single block start state
+ if s2.getEndState().startState != nil {
+ panic("IllegalState")
+ }
+
+ s2.getEndState().startState = state
+ }
+
+ if s2, ok := state.(*PlusLoopbackState); ok {
+ for _, t := range s2.GetTransitions() {
+ if t2, ok := t.getTarget().(*PlusBlockStartState); ok {
+ t2.loopBackState = state
+ }
+ }
+ } else if s2, ok := state.(*StarLoopbackState); ok {
+ for _, t := range s2.GetTransitions() {
+ if t2, ok := t.getTarget().(*StarLoopEntryState); ok {
+ t2.loopBackState = state
+ }
+ }
+ }
+ }
+}
+
+func (a *ATNDeserializer) readDecisions(atn *ATN) {
+ ndecisions := a.readInt()
+
+ for i := 0; i < ndecisions; i++ {
+ s := a.readInt()
+ decState := atn.states[s].(DecisionState)
+
+ atn.DecisionToState = append(atn.DecisionToState, decState)
+ decState.setDecision(i)
+ }
+}
+
+func (a *ATNDeserializer) readLexerActions(atn *ATN) {
+ if atn.grammarType == ATNTypeLexer {
+ count := a.readInt()
+
+ atn.lexerActions = make([]LexerAction, count)
+
+ for i := range atn.lexerActions {
+ actionType := a.readInt()
+ data1 := a.readInt()
+ data2 := a.readInt()
+ atn.lexerActions[i] = a.lexerActionFactory(actionType, data1, data2)
+ }
+ }
+}
+
+func (a *ATNDeserializer) generateRuleBypassTransitions(atn *ATN) {
+ count := len(atn.ruleToStartState)
+
+ for i := 0; i < count; i++ {
+ atn.ruleToTokenType[i] = atn.maxTokenType + i + 1
+ }
+
+ for i := 0; i < count; i++ {
+ a.generateRuleBypassTransition(atn, i)
+ }
+}
+
+func (a *ATNDeserializer) generateRuleBypassTransition(atn *ATN, idx int) {
+ bypassStart := NewBasicBlockStartState()
+
+ bypassStart.ruleIndex = idx
+ atn.addState(bypassStart)
+
+ bypassStop := NewBlockEndState()
+
+ bypassStop.ruleIndex = idx
+ atn.addState(bypassStop)
+
+ bypassStart.endState = bypassStop
+
+ atn.defineDecisionState(&bypassStart.BaseDecisionState)
+
+ bypassStop.startState = bypassStart
+
+ var excludeTransition Transition
+ var endState ATNState
+
+ if atn.ruleToStartState[idx].isPrecedenceRule {
+ // Wrap from the beginning of the rule to the StarLoopEntryState
+ endState = nil
+
+ for i := 0; i < len(atn.states); i++ {
+ state := atn.states[i]
+
+ if a.stateIsEndStateFor(state, idx) != nil {
+ endState = state
+ excludeTransition = state.(*StarLoopEntryState).loopBackState.GetTransitions()[0]
+
+ break
+ }
+ }
+
+ if excludeTransition == nil {
+ panic("Couldn't identify final state of the precedence rule prefix section.")
+ }
+ } else {
+ endState = atn.ruleToStopState[idx]
+ }
+
+ // All non-excluded transitions that currently target end state need to target
+ // blockEnd instead
+ for i := 0; i < len(atn.states); i++ {
+ state := atn.states[i]
+
+ for j := 0; j < len(state.GetTransitions()); j++ {
+ transition := state.GetTransitions()[j]
+
+ if transition == excludeTransition {
+ continue
+ }
+
+ if transition.getTarget() == endState {
+ transition.setTarget(bypassStop)
+ }
+ }
+ }
+
+ // All transitions leaving the rule start state need to leave blockStart instead
+ ruleToStartState := atn.ruleToStartState[idx]
+ count := len(ruleToStartState.GetTransitions())
+
+ for count > 0 {
+ bypassStart.AddTransition(ruleToStartState.GetTransitions()[count-1], -1)
+ ruleToStartState.SetTransitions([]Transition{ruleToStartState.GetTransitions()[len(ruleToStartState.GetTransitions())-1]})
+ }
+
+ // Link the new states
+ atn.ruleToStartState[idx].AddTransition(NewEpsilonTransition(bypassStart, -1), -1)
+ bypassStop.AddTransition(NewEpsilonTransition(endState, -1), -1)
+
+ MatchState := NewBasicState()
+
+ atn.addState(MatchState)
+ MatchState.AddTransition(NewAtomTransition(bypassStop, atn.ruleToTokenType[idx]), -1)
+ bypassStart.AddTransition(NewEpsilonTransition(MatchState, -1), -1)
+}
+
+func (a *ATNDeserializer) stateIsEndStateFor(state ATNState, idx int) ATNState {
+ if state.GetRuleIndex() != idx {
+ return nil
+ }
+
+ if _, ok := state.(*StarLoopEntryState); !ok {
+ return nil
+ }
+
+ maybeLoopEndState := state.GetTransitions()[len(state.GetTransitions())-1].getTarget()
+
+ if _, ok := maybeLoopEndState.(*LoopEndState); !ok {
+ return nil
+ }
+
+ var _, ok = maybeLoopEndState.GetTransitions()[0].getTarget().(*RuleStopState)
+
+ if maybeLoopEndState.(*LoopEndState).epsilonOnlyTransitions && ok {
+ return state
+ }
+
+ return nil
+}
+
+// markPrecedenceDecisions analyzes the StarLoopEntryState states in the
+// specified ATN to set the StarLoopEntryState.precedenceRuleDecision field to
+// the correct value.
+func (a *ATNDeserializer) markPrecedenceDecisions(atn *ATN) {
+ for _, state := range atn.states {
+ if _, ok := state.(*StarLoopEntryState); !ok {
+ continue
+ }
+
+ // We analyze the [ATN] to determine if an ATN decision state is the
+ // decision for the closure block that determines whether a
+ // precedence rule should continue or complete.
+ if atn.ruleToStartState[state.GetRuleIndex()].isPrecedenceRule {
+ maybeLoopEndState := state.GetTransitions()[len(state.GetTransitions())-1].getTarget()
+
+ if s3, ok := maybeLoopEndState.(*LoopEndState); ok {
+ var _, ok2 = maybeLoopEndState.GetTransitions()[0].getTarget().(*RuleStopState)
+
+ if s3.epsilonOnlyTransitions && ok2 {
+ state.(*StarLoopEntryState).precedenceRuleDecision = true
+ }
+ }
+ }
+ }
+}
+
+func (a *ATNDeserializer) verifyATN(atn *ATN) {
+ if !a.options.VerifyATN() {
+ return
+ }
+
+ // Verify assumptions
+ for _, state := range atn.states {
+ if state == nil {
+ continue
+ }
+
+ a.checkCondition(state.GetEpsilonOnlyTransitions() || len(state.GetTransitions()) <= 1, "")
+
+ switch s2 := state.(type) {
+ case *PlusBlockStartState:
+ a.checkCondition(s2.loopBackState != nil, "")
+
+ case *StarLoopEntryState:
+ a.checkCondition(s2.loopBackState != nil, "")
+ a.checkCondition(len(s2.GetTransitions()) == 2, "")
+
+ switch s2.transitions[0].getTarget().(type) {
+ case *StarBlockStartState:
+ _, ok := s2.transitions[1].getTarget().(*LoopEndState)
+
+ a.checkCondition(ok, "")
+ a.checkCondition(!s2.nonGreedy, "")
+
+ case *LoopEndState:
+ var _, ok = s2.transitions[1].getTarget().(*StarBlockStartState)
+
+ a.checkCondition(ok, "")
+ a.checkCondition(s2.nonGreedy, "")
+
+ default:
+ panic("IllegalState")
+ }
+
+ case *StarLoopbackState:
+ a.checkCondition(len(state.GetTransitions()) == 1, "")
+
+ var _, ok = state.GetTransitions()[0].getTarget().(*StarLoopEntryState)
+
+ a.checkCondition(ok, "")
+
+ case *LoopEndState:
+ a.checkCondition(s2.loopBackState != nil, "")
+
+ case *RuleStartState:
+ a.checkCondition(s2.stopState != nil, "")
+
+ case BlockStartState:
+ a.checkCondition(s2.getEndState() != nil, "")
+
+ case *BlockEndState:
+ a.checkCondition(s2.startState != nil, "")
+
+ case DecisionState:
+ a.checkCondition(len(s2.GetTransitions()) <= 1 || s2.getDecision() >= 0, "")
+
+ default:
+ var _, ok = s2.(*RuleStopState)
+
+ a.checkCondition(len(s2.GetTransitions()) <= 1 || ok, "")
+ }
+ }
+}
+
+func (a *ATNDeserializer) checkCondition(condition bool, message string) {
+ if !condition {
+ if message == "" {
+ message = "IllegalState"
+ }
+
+ panic(message)
+ }
+}
+
+func (a *ATNDeserializer) readInt() int {
+ v := a.data[a.pos]
+
+ a.pos++
+
+ return int(v) // data is 32 bits but int is at least that big
+}
+
+func (a *ATNDeserializer) edgeFactory(atn *ATN, typeIndex, _, trg, arg1, arg2, arg3 int, sets []*IntervalSet) Transition {
+ target := atn.states[trg]
+
+ switch typeIndex {
+ case TransitionEPSILON:
+ return NewEpsilonTransition(target, -1)
+
+ case TransitionRANGE:
+ if arg3 != 0 {
+ return NewRangeTransition(target, TokenEOF, arg2)
+ }
+
+ return NewRangeTransition(target, arg1, arg2)
+
+ case TransitionRULE:
+ return NewRuleTransition(atn.states[arg1], arg2, arg3, target)
+
+ case TransitionPREDICATE:
+ return NewPredicateTransition(target, arg1, arg2, arg3 != 0)
+
+ case TransitionPRECEDENCE:
+ return NewPrecedencePredicateTransition(target, arg1)
+
+ case TransitionATOM:
+ if arg3 != 0 {
+ return NewAtomTransition(target, TokenEOF)
+ }
+
+ return NewAtomTransition(target, arg1)
+
+ case TransitionACTION:
+ return NewActionTransition(target, arg1, arg2, arg3 != 0)
+
+ case TransitionSET:
+ return NewSetTransition(target, sets[arg1])
+
+ case TransitionNOTSET:
+ return NewNotSetTransition(target, sets[arg1])
+
+ case TransitionWILDCARD:
+ return NewWildcardTransition(target)
+ }
+
+ panic("The specified transition type is not valid.")
+}
+
+func (a *ATNDeserializer) stateFactory(typeIndex, ruleIndex int) ATNState {
+ var s ATNState
+
+ switch typeIndex {
+ case ATNStateInvalidType:
+ return nil
+
+ case ATNStateBasic:
+ s = NewBasicState()
+
+ case ATNStateRuleStart:
+ s = NewRuleStartState()
+
+ case ATNStateBlockStart:
+ s = NewBasicBlockStartState()
+
+ case ATNStatePlusBlockStart:
+ s = NewPlusBlockStartState()
+
+ case ATNStateStarBlockStart:
+ s = NewStarBlockStartState()
+
+ case ATNStateTokenStart:
+ s = NewTokensStartState()
+
+ case ATNStateRuleStop:
+ s = NewRuleStopState()
+
+ case ATNStateBlockEnd:
+ s = NewBlockEndState()
+
+ case ATNStateStarLoopBack:
+ s = NewStarLoopbackState()
+
+ case ATNStateStarLoopEntry:
+ s = NewStarLoopEntryState()
+
+ case ATNStatePlusLoopBack:
+ s = NewPlusLoopbackState()
+
+ case ATNStateLoopEnd:
+ s = NewLoopEndState()
+
+ default:
+ panic(fmt.Sprintf("state type %d is invalid", typeIndex))
+ }
+
+ s.SetRuleIndex(ruleIndex)
+
+ return s
+}
+
+func (a *ATNDeserializer) lexerActionFactory(typeIndex, data1, data2 int) LexerAction {
+ switch typeIndex {
+ case LexerActionTypeChannel:
+ return NewLexerChannelAction(data1)
+
+ case LexerActionTypeCustom:
+ return NewLexerCustomAction(data1, data2)
+
+ case LexerActionTypeMode:
+ return NewLexerModeAction(data1)
+
+ case LexerActionTypeMore:
+ return LexerMoreActionINSTANCE
+
+ case LexerActionTypePopMode:
+ return LexerPopModeActionINSTANCE
+
+ case LexerActionTypePushMode:
+ return NewLexerPushModeAction(data1)
+
+ case LexerActionTypeSkip:
+ return LexerSkipActionINSTANCE
+
+ case LexerActionTypeType:
+ return NewLexerTypeAction(data1)
+
+ default:
+ panic(fmt.Sprintf("lexer action %d is invalid", typeIndex))
+ }
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_simulator.go b/vendor/github.com/antlr4-go/antlr/v4/atn_simulator.go
new file mode 100644
index 000000000..afe6c9f80
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/atn_simulator.go
@@ -0,0 +1,41 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+var ATNSimulatorError = NewDFAState(0x7FFFFFFF, NewATNConfigSet(false))
+
+type IATNSimulator interface {
+ SharedContextCache() *PredictionContextCache
+ ATN() *ATN
+ DecisionToDFA() []*DFA
+}
+
+type BaseATNSimulator struct {
+ atn *ATN
+ sharedContextCache *PredictionContextCache
+ decisionToDFA []*DFA
+}
+
+func (b *BaseATNSimulator) getCachedContext(context *PredictionContext) *PredictionContext {
+ if b.sharedContextCache == nil {
+ return context
+ }
+
+ //visited := NewJMap[*PredictionContext, *PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionVisitedCollection, "Visit map in getCachedContext()")
+ visited := NewVisitRecord()
+ return getCachedBasePredictionContext(context, b.sharedContextCache, visited)
+}
+
+func (b *BaseATNSimulator) SharedContextCache() *PredictionContextCache {
+ return b.sharedContextCache
+}
+
+func (b *BaseATNSimulator) ATN() *ATN {
+ return b.atn
+}
+
+func (b *BaseATNSimulator) DecisionToDFA() []*DFA {
+ return b.decisionToDFA
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_state.go b/vendor/github.com/antlr4-go/antlr/v4/atn_state.go
new file mode 100644
index 000000000..2ae5807cd
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/atn_state.go
@@ -0,0 +1,461 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+)
+
+// Constants for serialization.
+const (
+ ATNStateInvalidType = 0
+ ATNStateBasic = 1
+ ATNStateRuleStart = 2
+ ATNStateBlockStart = 3
+ ATNStatePlusBlockStart = 4
+ ATNStateStarBlockStart = 5
+ ATNStateTokenStart = 6
+ ATNStateRuleStop = 7
+ ATNStateBlockEnd = 8
+ ATNStateStarLoopBack = 9
+ ATNStateStarLoopEntry = 10
+ ATNStatePlusLoopBack = 11
+ ATNStateLoopEnd = 12
+
+ ATNStateInvalidStateNumber = -1
+)
+
+//goland:noinspection GoUnusedGlobalVariable
+var ATNStateInitialNumTransitions = 4
+
+type ATNState interface {
+ GetEpsilonOnlyTransitions() bool
+
+ GetRuleIndex() int
+ SetRuleIndex(int)
+
+ GetNextTokenWithinRule() *IntervalSet
+ SetNextTokenWithinRule(*IntervalSet)
+
+ GetATN() *ATN
+ SetATN(*ATN)
+
+ GetStateType() int
+
+ GetStateNumber() int
+ SetStateNumber(int)
+
+ GetTransitions() []Transition
+ SetTransitions([]Transition)
+ AddTransition(Transition, int)
+
+ String() string
+ Hash() int
+ Equals(Collectable[ATNState]) bool
+}
+
+type BaseATNState struct {
+ // NextTokenWithinRule caches lookahead during parsing. Not used during construction.
+ NextTokenWithinRule *IntervalSet
+
+ // atn is the current ATN.
+ atn *ATN
+
+ epsilonOnlyTransitions bool
+
+ // ruleIndex tracks the Rule index because there are no Rule objects at runtime.
+ ruleIndex int
+
+ stateNumber int
+
+ stateType int
+
+ // Track the transitions emanating from this ATN state.
+ transitions []Transition
+}
+
+func NewATNState() *BaseATNState {
+ return &BaseATNState{stateNumber: ATNStateInvalidStateNumber, stateType: ATNStateInvalidType}
+}
+
+func (as *BaseATNState) GetRuleIndex() int {
+ return as.ruleIndex
+}
+
+func (as *BaseATNState) SetRuleIndex(v int) {
+ as.ruleIndex = v
+}
+func (as *BaseATNState) GetEpsilonOnlyTransitions() bool {
+ return as.epsilonOnlyTransitions
+}
+
+func (as *BaseATNState) GetATN() *ATN {
+ return as.atn
+}
+
+func (as *BaseATNState) SetATN(atn *ATN) {
+ as.atn = atn
+}
+
+func (as *BaseATNState) GetTransitions() []Transition {
+ return as.transitions
+}
+
+func (as *BaseATNState) SetTransitions(t []Transition) {
+ as.transitions = t
+}
+
+func (as *BaseATNState) GetStateType() int {
+ return as.stateType
+}
+
+func (as *BaseATNState) GetStateNumber() int {
+ return as.stateNumber
+}
+
+func (as *BaseATNState) SetStateNumber(stateNumber int) {
+ as.stateNumber = stateNumber
+}
+
+func (as *BaseATNState) GetNextTokenWithinRule() *IntervalSet {
+ return as.NextTokenWithinRule
+}
+
+func (as *BaseATNState) SetNextTokenWithinRule(v *IntervalSet) {
+ as.NextTokenWithinRule = v
+}
+
+func (as *BaseATNState) Hash() int {
+ return as.stateNumber
+}
+
+func (as *BaseATNState) String() string {
+ return strconv.Itoa(as.stateNumber)
+}
+
+func (as *BaseATNState) Equals(other Collectable[ATNState]) bool {
+ if ot, ok := other.(ATNState); ok {
+ return as.stateNumber == ot.GetStateNumber()
+ }
+
+ return false
+}
+
+func (as *BaseATNState) isNonGreedyExitState() bool {
+ return false
+}
+
+func (as *BaseATNState) AddTransition(trans Transition, index int) {
+ if len(as.transitions) == 0 {
+ as.epsilonOnlyTransitions = trans.getIsEpsilon()
+ } else if as.epsilonOnlyTransitions != trans.getIsEpsilon() {
+ _, _ = fmt.Fprintf(os.Stdin, "ATN state %d has both epsilon and non-epsilon transitions.\n", as.stateNumber)
+ as.epsilonOnlyTransitions = false
+ }
+
+ // TODO: Check code for already present compared to the Java equivalent
+ //alreadyPresent := false
+ //for _, t := range as.transitions {
+ // if t.getTarget().GetStateNumber() == trans.getTarget().GetStateNumber() {
+ // if t.getLabel() != nil && trans.getLabel() != nil && trans.getLabel().Equals(t.getLabel()) {
+ // alreadyPresent = true
+ // break
+ // }
+ // } else if t.getIsEpsilon() && trans.getIsEpsilon() {
+ // alreadyPresent = true
+ // break
+ // }
+ //}
+ //if !alreadyPresent {
+ if index == -1 {
+ as.transitions = append(as.transitions, trans)
+ } else {
+ as.transitions = append(as.transitions[:index], append([]Transition{trans}, as.transitions[index:]...)...)
+ // TODO: as.transitions.splice(index, 1, trans)
+ }
+ //} else {
+ // _, _ = fmt.Fprintf(os.Stderr, "Transition already present in state %d\n", as.stateNumber)
+ //}
+}
+
+type BasicState struct {
+ BaseATNState
+}
+
+func NewBasicState() *BasicState {
+ return &BasicState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateBasic,
+ },
+ }
+}
+
+type DecisionState interface {
+ ATNState
+
+ getDecision() int
+ setDecision(int)
+
+ getNonGreedy() bool
+ setNonGreedy(bool)
+}
+
+type BaseDecisionState struct {
+ BaseATNState
+ decision int
+ nonGreedy bool
+}
+
+func NewBaseDecisionState() *BaseDecisionState {
+ return &BaseDecisionState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateBasic,
+ },
+ decision: -1,
+ }
+}
+
+func (s *BaseDecisionState) getDecision() int {
+ return s.decision
+}
+
+func (s *BaseDecisionState) setDecision(b int) {
+ s.decision = b
+}
+
+func (s *BaseDecisionState) getNonGreedy() bool {
+ return s.nonGreedy
+}
+
+func (s *BaseDecisionState) setNonGreedy(b bool) {
+ s.nonGreedy = b
+}
+
+type BlockStartState interface {
+ DecisionState
+
+ getEndState() *BlockEndState
+ setEndState(*BlockEndState)
+}
+
+// BaseBlockStartState is the start of a regular (...) block.
+type BaseBlockStartState struct {
+ BaseDecisionState
+ endState *BlockEndState
+}
+
+func NewBlockStartState() *BaseBlockStartState {
+ return &BaseBlockStartState{
+ BaseDecisionState: BaseDecisionState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateBasic,
+ },
+ decision: -1,
+ },
+ }
+}
+
+func (s *BaseBlockStartState) getEndState() *BlockEndState {
+ return s.endState
+}
+
+func (s *BaseBlockStartState) setEndState(b *BlockEndState) {
+ s.endState = b
+}
+
+type BasicBlockStartState struct {
+ BaseBlockStartState
+}
+
+func NewBasicBlockStartState() *BasicBlockStartState {
+ return &BasicBlockStartState{
+ BaseBlockStartState: BaseBlockStartState{
+ BaseDecisionState: BaseDecisionState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateBlockStart,
+ },
+ },
+ },
+ }
+}
+
+var _ BlockStartState = &BasicBlockStartState{}
+
+// BlockEndState is a terminal node of a simple (a|b|c) block.
+type BlockEndState struct {
+ BaseATNState
+ startState ATNState
+}
+
+func NewBlockEndState() *BlockEndState {
+ return &BlockEndState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateBlockEnd,
+ },
+ startState: nil,
+ }
+}
+
+// RuleStopState is the last node in the ATN for a rule, unless that rule is the
+// start symbol. In that case, there is one transition to EOF. Later, we might
+// encode references to all calls to this rule to compute FOLLOW sets for error
+// handling.
+type RuleStopState struct {
+ BaseATNState
+}
+
+func NewRuleStopState() *RuleStopState {
+ return &RuleStopState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateRuleStop,
+ },
+ }
+}
+
+type RuleStartState struct {
+ BaseATNState
+ stopState ATNState
+ isPrecedenceRule bool
+}
+
+func NewRuleStartState() *RuleStartState {
+ return &RuleStartState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateRuleStart,
+ },
+ }
+}
+
+// PlusLoopbackState is a decision state for A+ and (A|B)+. It has two
+// transitions: one to the loop back to start of the block, and one to exit.
+type PlusLoopbackState struct {
+ BaseDecisionState
+}
+
+func NewPlusLoopbackState() *PlusLoopbackState {
+ return &PlusLoopbackState{
+ BaseDecisionState: BaseDecisionState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStatePlusLoopBack,
+ },
+ },
+ }
+}
+
+// PlusBlockStartState is the start of a (A|B|...)+ loop. Technically it is a
+// decision state; we don't use it for code generation. Somebody might need it,
+// it is included for completeness. In reality, PlusLoopbackState is the real
+// decision-making node for A+.
+type PlusBlockStartState struct {
+ BaseBlockStartState
+ loopBackState ATNState
+}
+
+func NewPlusBlockStartState() *PlusBlockStartState {
+ return &PlusBlockStartState{
+ BaseBlockStartState: BaseBlockStartState{
+ BaseDecisionState: BaseDecisionState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStatePlusBlockStart,
+ },
+ },
+ },
+ }
+}
+
+var _ BlockStartState = &PlusBlockStartState{}
+
+// StarBlockStartState is the block that begins a closure loop.
+type StarBlockStartState struct {
+ BaseBlockStartState
+}
+
+func NewStarBlockStartState() *StarBlockStartState {
+ return &StarBlockStartState{
+ BaseBlockStartState: BaseBlockStartState{
+ BaseDecisionState: BaseDecisionState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateStarBlockStart,
+ },
+ },
+ },
+ }
+}
+
+var _ BlockStartState = &StarBlockStartState{}
+
+type StarLoopbackState struct {
+ BaseATNState
+}
+
+func NewStarLoopbackState() *StarLoopbackState {
+ return &StarLoopbackState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateStarLoopBack,
+ },
+ }
+}
+
+type StarLoopEntryState struct {
+ BaseDecisionState
+ loopBackState ATNState
+ precedenceRuleDecision bool
+}
+
+func NewStarLoopEntryState() *StarLoopEntryState {
+ // False precedenceRuleDecision indicates whether s state can benefit from a precedence DFA during SLL decision making.
+ return &StarLoopEntryState{
+ BaseDecisionState: BaseDecisionState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateStarLoopEntry,
+ },
+ },
+ }
+}
+
+// LoopEndState marks the end of a * or + loop.
+type LoopEndState struct {
+ BaseATNState
+ loopBackState ATNState
+}
+
+func NewLoopEndState() *LoopEndState {
+ return &LoopEndState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateLoopEnd,
+ },
+ }
+}
+
+// TokensStartState is the Tokens rule start state linking to each lexer rule start state.
+type TokensStartState struct {
+ BaseDecisionState
+}
+
+func NewTokensStartState() *TokensStartState {
+ return &TokensStartState{
+ BaseDecisionState: BaseDecisionState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateTokenStart,
+ },
+ },
+ }
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_type.go b/vendor/github.com/antlr4-go/antlr/v4/atn_type.go
new file mode 100644
index 000000000..3a515a145
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/atn_type.go
@@ -0,0 +1,11 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// Represent the type of recognizer an ATN applies to.
+const (
+ ATNTypeLexer = 0
+ ATNTypeParser = 1
+)
diff --git a/vendor/github.com/antlr4-go/antlr/v4/char_stream.go b/vendor/github.com/antlr4-go/antlr/v4/char_stream.go
new file mode 100644
index 000000000..bd8127b6b
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/char_stream.go
@@ -0,0 +1,12 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+type CharStream interface {
+ IntStream
+ GetText(int, int) string
+ GetTextFromTokens(start, end Token) string
+ GetTextFromInterval(Interval) string
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/common_token_factory.go b/vendor/github.com/antlr4-go/antlr/v4/common_token_factory.go
new file mode 100644
index 000000000..1bb0314ea
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/common_token_factory.go
@@ -0,0 +1,56 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// TokenFactory creates CommonToken objects.
+type TokenFactory interface {
+ Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token
+}
+
+// CommonTokenFactory is the default TokenFactory implementation.
+type CommonTokenFactory struct {
+ // copyText indicates whether CommonToken.setText should be called after
+ // constructing tokens to explicitly set the text. This is useful for cases
+ // where the input stream might not be able to provide arbitrary substrings of
+ // text from the input after the lexer creates a token (e.g. the
+ // implementation of CharStream.GetText in UnbufferedCharStream panics an
+ // UnsupportedOperationException). Explicitly setting the token text allows
+ // Token.GetText to be called at any time regardless of the input stream
+ // implementation.
+ //
+ // The default value is false to avoid the performance and memory overhead of
+ // copying text for every token unless explicitly requested.
+ copyText bool
+}
+
+func NewCommonTokenFactory(copyText bool) *CommonTokenFactory {
+ return &CommonTokenFactory{copyText: copyText}
+}
+
+// CommonTokenFactoryDEFAULT is the default CommonTokenFactory. It does not
+// explicitly copy token text when constructing tokens.
+var CommonTokenFactoryDEFAULT = NewCommonTokenFactory(false)
+
+func (c *CommonTokenFactory) Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token {
+ t := NewCommonToken(source, ttype, channel, start, stop)
+
+ t.line = line
+ t.column = column
+
+ if text != "" {
+ t.SetText(text)
+ } else if c.copyText && source.charStream != nil {
+ t.SetText(source.charStream.GetTextFromInterval(NewInterval(start, stop)))
+ }
+
+ return t
+}
+
+func (c *CommonTokenFactory) createThin(ttype int, text string) Token {
+ t := NewCommonToken(nil, ttype, TokenDefaultChannel, -1, -1)
+ t.SetText(text)
+
+ return t
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/common_token_stream.go b/vendor/github.com/antlr4-go/antlr/v4/common_token_stream.go
new file mode 100644
index 000000000..b75da9df0
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/common_token_stream.go
@@ -0,0 +1,450 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "strconv"
+)
+
+// CommonTokenStream is an implementation of TokenStream that loads tokens from
+// a TokenSource on-demand and places the tokens in a buffer to provide access
+// to any previous token by index. This token stream ignores the value of
+// Token.getChannel. If your parser requires the token stream filter tokens to
+// only those on a particular channel, such as Token.DEFAULT_CHANNEL or
+// Token.HIDDEN_CHANNEL, use a filtering token stream such a CommonTokenStream.
+type CommonTokenStream struct {
+ channel int
+
+ // fetchedEOF indicates whether the Token.EOF token has been fetched from
+ // tokenSource and added to tokens. This field improves performance for the
+ // following cases:
+ //
+ // consume: The lookahead check in consume to preven consuming the EOF symbol is
+ // optimized by checking the values of fetchedEOF and p instead of calling LA.
+ //
+ // fetch: The check to prevent adding multiple EOF symbols into tokens is
+ // trivial with bt field.
+ fetchedEOF bool
+
+ // index into [tokens] of the current token (next token to consume).
+ // tokens[p] should be LT(1). It is set to -1 when the stream is first
+ // constructed or when SetTokenSource is called, indicating that the first token
+ // has not yet been fetched from the token source. For additional information,
+ // see the documentation of [IntStream] for a description of initializing methods.
+ index int
+
+ // tokenSource is the [TokenSource] from which tokens for the bt stream are
+ // fetched.
+ tokenSource TokenSource
+
+ // tokens contains all tokens fetched from the token source. The list is considered a
+ // complete view of the input once fetchedEOF is set to true.
+ tokens []Token
+}
+
+// NewCommonTokenStream creates a new CommonTokenStream instance using the supplied lexer to produce
+// tokens and will pull tokens from the given lexer channel.
+func NewCommonTokenStream(lexer Lexer, channel int) *CommonTokenStream {
+ return &CommonTokenStream{
+ channel: channel,
+ index: -1,
+ tokenSource: lexer,
+ tokens: make([]Token, 0),
+ }
+}
+
+// GetAllTokens returns all tokens currently pulled from the token source.
+func (c *CommonTokenStream) GetAllTokens() []Token {
+ return c.tokens
+}
+
+func (c *CommonTokenStream) Mark() int {
+ return 0
+}
+
+func (c *CommonTokenStream) Release(_ int) {}
+
+func (c *CommonTokenStream) Reset() {
+ c.fetchedEOF = false
+ c.tokens = make([]Token, 0)
+ c.Seek(0)
+}
+
+func (c *CommonTokenStream) Seek(index int) {
+ c.lazyInit()
+ c.index = c.adjustSeekIndex(index)
+}
+
+func (c *CommonTokenStream) Get(index int) Token {
+ c.lazyInit()
+
+ return c.tokens[index]
+}
+
+func (c *CommonTokenStream) Consume() {
+ SkipEOFCheck := false
+
+ if c.index >= 0 {
+ if c.fetchedEOF {
+ // The last token in tokens is EOF. Skip the check if p indexes any fetched.
+ // token except the last.
+ SkipEOFCheck = c.index < len(c.tokens)-1
+ } else {
+ // No EOF token in tokens. Skip the check if p indexes a fetched token.
+ SkipEOFCheck = c.index < len(c.tokens)
+ }
+ } else {
+ // Not yet initialized
+ SkipEOFCheck = false
+ }
+
+ if !SkipEOFCheck && c.LA(1) == TokenEOF {
+ panic("cannot consume EOF")
+ }
+
+ if c.Sync(c.index + 1) {
+ c.index = c.adjustSeekIndex(c.index + 1)
+ }
+}
+
+// Sync makes sure index i in tokens has a token and returns true if a token is
+// located at index i and otherwise false.
+func (c *CommonTokenStream) Sync(i int) bool {
+ n := i - len(c.tokens) + 1 // How many more elements do we need?
+
+ if n > 0 {
+ fetched := c.fetch(n)
+ return fetched >= n
+ }
+
+ return true
+}
+
+// fetch adds n elements to buffer and returns the actual number of elements
+// added to the buffer.
+func (c *CommonTokenStream) fetch(n int) int {
+ if c.fetchedEOF {
+ return 0
+ }
+
+ for i := 0; i < n; i++ {
+ t := c.tokenSource.NextToken()
+
+ t.SetTokenIndex(len(c.tokens))
+ c.tokens = append(c.tokens, t)
+
+ if t.GetTokenType() == TokenEOF {
+ c.fetchedEOF = true
+
+ return i + 1
+ }
+ }
+
+ return n
+}
+
+// GetTokens gets all tokens from start to stop inclusive.
+func (c *CommonTokenStream) GetTokens(start int, stop int, types *IntervalSet) []Token {
+ if start < 0 || stop < 0 {
+ return nil
+ }
+
+ c.lazyInit()
+
+ subset := make([]Token, 0)
+
+ if stop >= len(c.tokens) {
+ stop = len(c.tokens) - 1
+ }
+
+ for i := start; i < stop; i++ {
+ t := c.tokens[i]
+
+ if t.GetTokenType() == TokenEOF {
+ break
+ }
+
+ if types == nil || types.contains(t.GetTokenType()) {
+ subset = append(subset, t)
+ }
+ }
+
+ return subset
+}
+
+func (c *CommonTokenStream) LA(i int) int {
+ return c.LT(i).GetTokenType()
+}
+
+func (c *CommonTokenStream) lazyInit() {
+ if c.index == -1 {
+ c.setup()
+ }
+}
+
+func (c *CommonTokenStream) setup() {
+ c.Sync(0)
+ c.index = c.adjustSeekIndex(0)
+}
+
+func (c *CommonTokenStream) GetTokenSource() TokenSource {
+ return c.tokenSource
+}
+
+// SetTokenSource resets the c token stream by setting its token source.
+func (c *CommonTokenStream) SetTokenSource(tokenSource TokenSource) {
+ c.tokenSource = tokenSource
+ c.tokens = make([]Token, 0)
+ c.index = -1
+ c.fetchedEOF = false
+}
+
+// NextTokenOnChannel returns the index of the next token on channel given a
+// starting index. Returns i if tokens[i] is on channel. Returns -1 if there are
+// no tokens on channel between 'i' and [TokenEOF].
+func (c *CommonTokenStream) NextTokenOnChannel(i, _ int) int {
+ c.Sync(i)
+
+ if i >= len(c.tokens) {
+ return -1
+ }
+
+ token := c.tokens[i]
+
+ for token.GetChannel() != c.channel {
+ if token.GetTokenType() == TokenEOF {
+ return -1
+ }
+
+ i++
+ c.Sync(i)
+ token = c.tokens[i]
+ }
+
+ return i
+}
+
+// previousTokenOnChannel returns the index of the previous token on channel
+// given a starting index. Returns i if tokens[i] is on channel. Returns -1 if
+// there are no tokens on channel between i and 0.
+func (c *CommonTokenStream) previousTokenOnChannel(i, channel int) int {
+ for i >= 0 && c.tokens[i].GetChannel() != channel {
+ i--
+ }
+
+ return i
+}
+
+// GetHiddenTokensToRight collects all tokens on a specified channel to the
+// right of the current token up until we see a token on DEFAULT_TOKEN_CHANNEL
+// or EOF. If channel is -1, it finds any non-default channel token.
+func (c *CommonTokenStream) GetHiddenTokensToRight(tokenIndex, channel int) []Token {
+ c.lazyInit()
+
+ if tokenIndex < 0 || tokenIndex >= len(c.tokens) {
+ panic(strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(c.tokens)-1))
+ }
+
+ nextOnChannel := c.NextTokenOnChannel(tokenIndex+1, LexerDefaultTokenChannel)
+ from := tokenIndex + 1
+
+ // If no onChannel to the right, then nextOnChannel == -1, so set 'to' to the last token
+ var to int
+
+ if nextOnChannel == -1 {
+ to = len(c.tokens) - 1
+ } else {
+ to = nextOnChannel
+ }
+
+ return c.filterForChannel(from, to, channel)
+}
+
+// GetHiddenTokensToLeft collects all tokens on channel to the left of the
+// current token until we see a token on DEFAULT_TOKEN_CHANNEL. If channel is
+// -1, it finds any non default channel token.
+func (c *CommonTokenStream) GetHiddenTokensToLeft(tokenIndex, channel int) []Token {
+ c.lazyInit()
+
+ if tokenIndex < 0 || tokenIndex >= len(c.tokens) {
+ panic(strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(c.tokens)-1))
+ }
+
+ prevOnChannel := c.previousTokenOnChannel(tokenIndex-1, LexerDefaultTokenChannel)
+
+ if prevOnChannel == tokenIndex-1 {
+ return nil
+ }
+
+ // If there are none on channel to the left and prevOnChannel == -1 then from = 0
+ from := prevOnChannel + 1
+ to := tokenIndex - 1
+
+ return c.filterForChannel(from, to, channel)
+}
+
+func (c *CommonTokenStream) filterForChannel(left, right, channel int) []Token {
+ hidden := make([]Token, 0)
+
+ for i := left; i < right+1; i++ {
+ t := c.tokens[i]
+
+ if channel == -1 {
+ if t.GetChannel() != LexerDefaultTokenChannel {
+ hidden = append(hidden, t)
+ }
+ } else if t.GetChannel() == channel {
+ hidden = append(hidden, t)
+ }
+ }
+
+ if len(hidden) == 0 {
+ return nil
+ }
+
+ return hidden
+}
+
+func (c *CommonTokenStream) GetSourceName() string {
+ return c.tokenSource.GetSourceName()
+}
+
+func (c *CommonTokenStream) Size() int {
+ return len(c.tokens)
+}
+
+func (c *CommonTokenStream) Index() int {
+ return c.index
+}
+
+func (c *CommonTokenStream) GetAllText() string {
+ c.Fill()
+ return c.GetTextFromInterval(NewInterval(0, len(c.tokens)-1))
+}
+
+func (c *CommonTokenStream) GetTextFromTokens(start, end Token) string {
+ if start == nil || end == nil {
+ return ""
+ }
+
+ return c.GetTextFromInterval(NewInterval(start.GetTokenIndex(), end.GetTokenIndex()))
+}
+
+func (c *CommonTokenStream) GetTextFromRuleContext(interval RuleContext) string {
+ return c.GetTextFromInterval(interval.GetSourceInterval())
+}
+
+func (c *CommonTokenStream) GetTextFromInterval(interval Interval) string {
+ c.lazyInit()
+ c.Sync(interval.Stop)
+
+ start := interval.Start
+ stop := interval.Stop
+
+ if start < 0 || stop < 0 {
+ return ""
+ }
+
+ if stop >= len(c.tokens) {
+ stop = len(c.tokens) - 1
+ }
+
+ s := ""
+
+ for i := start; i < stop+1; i++ {
+ t := c.tokens[i]
+
+ if t.GetTokenType() == TokenEOF {
+ break
+ }
+
+ s += t.GetText()
+ }
+
+ return s
+}
+
+// Fill gets all tokens from the lexer until EOF.
+func (c *CommonTokenStream) Fill() {
+ c.lazyInit()
+
+ for c.fetch(1000) == 1000 {
+ continue
+ }
+}
+
+func (c *CommonTokenStream) adjustSeekIndex(i int) int {
+ return c.NextTokenOnChannel(i, c.channel)
+}
+
+func (c *CommonTokenStream) LB(k int) Token {
+ if k == 0 || c.index-k < 0 {
+ return nil
+ }
+
+ i := c.index
+ n := 1
+
+ // Find k good tokens looking backward
+ for n <= k {
+ // Skip off-channel tokens
+ i = c.previousTokenOnChannel(i-1, c.channel)
+ n++
+ }
+
+ if i < 0 {
+ return nil
+ }
+
+ return c.tokens[i]
+}
+
+func (c *CommonTokenStream) LT(k int) Token {
+ c.lazyInit()
+
+ if k == 0 {
+ return nil
+ }
+
+ if k < 0 {
+ return c.LB(-k)
+ }
+
+ i := c.index
+ n := 1 // We know tokens[n] is valid
+
+ // Find k good tokens
+ for n < k {
+ // Skip off-channel tokens, but make sure to not look past EOF
+ if c.Sync(i + 1) {
+ i = c.NextTokenOnChannel(i+1, c.channel)
+ }
+
+ n++
+ }
+
+ return c.tokens[i]
+}
+
+// getNumberOfOnChannelTokens counts EOF once.
+func (c *CommonTokenStream) getNumberOfOnChannelTokens() int {
+ var n int
+
+ c.Fill()
+
+ for i := 0; i < len(c.tokens); i++ {
+ t := c.tokens[i]
+
+ if t.GetChannel() == c.channel {
+ n++
+ }
+
+ if t.GetTokenType() == TokenEOF {
+ break
+ }
+ }
+
+ return n
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/comparators.go b/vendor/github.com/antlr4-go/antlr/v4/comparators.go
new file mode 100644
index 000000000..7467e9b43
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/comparators.go
@@ -0,0 +1,150 @@
+package antlr
+
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+// This file contains all the implementations of custom comparators used for generic collections when the
+// Hash() and Equals() funcs supplied by the struct objects themselves need to be overridden. Normally, we would
+// put the comparators in the source file for the struct themselves, but given the organization of this code is
+// sorta kinda based upon the Java code, I found it confusing trying to find out which comparator was where and used by
+// which instantiation of a collection. For instance, an Array2DHashSet in the Java source, when used with ATNConfig
+// collections requires three different comparators depending on what the collection is being used for. Collecting - pun intended -
+// all the comparators here, makes it much easier to see which implementation of hash and equals is used by which collection.
+// It also makes it easy to verify that the Hash() and Equals() functions marry up with the Java implementations.
+
+// ObjEqComparator is the equivalent of the Java ObjectEqualityComparator, which is the default instance of
+// Equality comparator. We do not have inheritance in Go, only interfaces, so we use generics to enforce some
+// type safety and avoid having to implement this for every type that we want to perform comparison on.
+//
+// This comparator works by using the standard Hash() and Equals() methods of the type T that is being compared. Which
+// allows us to use it in any collection instance that does not require a special hash or equals implementation.
+type ObjEqComparator[T Collectable[T]] struct{}
+
+var (
+ aStateEqInst = &ObjEqComparator[ATNState]{}
+ aConfEqInst = &ObjEqComparator[*ATNConfig]{}
+
+ // aConfCompInst is the comparator used for the ATNConfigSet for the configLookup cache
+ aConfCompInst = &ATNConfigComparator[*ATNConfig]{}
+ atnConfCompInst = &BaseATNConfigComparator[*ATNConfig]{}
+ dfaStateEqInst = &ObjEqComparator[*DFAState]{}
+ semctxEqInst = &ObjEqComparator[SemanticContext]{}
+ atnAltCfgEqInst = &ATNAltConfigComparator[*ATNConfig]{}
+ pContextEqInst = &ObjEqComparator[*PredictionContext]{}
+)
+
+// Equals2 delegates to the Equals() method of type T
+func (c *ObjEqComparator[T]) Equals2(o1, o2 T) bool {
+ return o1.Equals(o2)
+}
+
+// Hash1 delegates to the Hash() method of type T
+func (c *ObjEqComparator[T]) Hash1(o T) int {
+
+ return o.Hash()
+}
+
+type SemCComparator[T Collectable[T]] struct{}
+
+// ATNConfigComparator is used as the comparator for the configLookup field of an ATNConfigSet
+// and has a custom Equals() and Hash() implementation, because equality is not based on the
+// standard Hash() and Equals() methods of the ATNConfig type.
+type ATNConfigComparator[T Collectable[T]] struct {
+}
+
+// Equals2 is a custom comparator for ATNConfigs specifically for configLookup
+func (c *ATNConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool {
+
+ // Same pointer, must be equal, even if both nil
+ //
+ if o1 == o2 {
+ return true
+
+ }
+
+ // If either are nil, but not both, then the result is false
+ //
+ if o1 == nil || o2 == nil {
+ return false
+ }
+
+ return o1.GetState().GetStateNumber() == o2.GetState().GetStateNumber() &&
+ o1.GetAlt() == o2.GetAlt() &&
+ o1.GetSemanticContext().Equals(o2.GetSemanticContext())
+}
+
+// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup
+func (c *ATNConfigComparator[T]) Hash1(o *ATNConfig) int {
+
+ hash := 7
+ hash = 31*hash + o.GetState().GetStateNumber()
+ hash = 31*hash + o.GetAlt()
+ hash = 31*hash + o.GetSemanticContext().Hash()
+ return hash
+}
+
+// ATNAltConfigComparator is used as the comparator for mapping configs to Alt Bitsets
+type ATNAltConfigComparator[T Collectable[T]] struct {
+}
+
+// Equals2 is a custom comparator for ATNConfigs specifically for configLookup
+func (c *ATNAltConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool {
+
+ // Same pointer, must be equal, even if both nil
+ //
+ if o1 == o2 {
+ return true
+
+ }
+
+ // If either are nil, but not both, then the result is false
+ //
+ if o1 == nil || o2 == nil {
+ return false
+ }
+
+ return o1.GetState().GetStateNumber() == o2.GetState().GetStateNumber() &&
+ o1.GetContext().Equals(o2.GetContext())
+}
+
+// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup
+func (c *ATNAltConfigComparator[T]) Hash1(o *ATNConfig) int {
+ h := murmurInit(7)
+ h = murmurUpdate(h, o.GetState().GetStateNumber())
+ h = murmurUpdate(h, o.GetContext().Hash())
+ return murmurFinish(h, 2)
+}
+
+// BaseATNConfigComparator is used as the comparator for the configLookup field of a ATNConfigSet
+// and has a custom Equals() and Hash() implementation, because equality is not based on the
+// standard Hash() and Equals() methods of the ATNConfig type.
+type BaseATNConfigComparator[T Collectable[T]] struct {
+}
+
+// Equals2 is a custom comparator for ATNConfigs specifically for baseATNConfigSet
+func (c *BaseATNConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool {
+
+ // Same pointer, must be equal, even if both nil
+ //
+ if o1 == o2 {
+ return true
+
+ }
+
+ // If either are nil, but not both, then the result is false
+ //
+ if o1 == nil || o2 == nil {
+ return false
+ }
+
+ return o1.GetState().GetStateNumber() == o2.GetState().GetStateNumber() &&
+ o1.GetAlt() == o2.GetAlt() &&
+ o1.GetSemanticContext().Equals(o2.GetSemanticContext())
+}
+
+// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup, but in fact just
+// delegates to the standard Hash() method of the ATNConfig type.
+func (c *BaseATNConfigComparator[T]) Hash1(o *ATNConfig) int {
+ return o.Hash()
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/configuration.go b/vendor/github.com/antlr4-go/antlr/v4/configuration.go
new file mode 100644
index 000000000..c2b724514
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/configuration.go
@@ -0,0 +1,214 @@
+package antlr
+
+type runtimeConfiguration struct {
+ statsTraceStacks bool
+ lexerATNSimulatorDebug bool
+ lexerATNSimulatorDFADebug bool
+ parserATNSimulatorDebug bool
+ parserATNSimulatorTraceATNSim bool
+ parserATNSimulatorDFADebug bool
+ parserATNSimulatorRetryDebug bool
+ lRLoopEntryBranchOpt bool
+ memoryManager bool
+}
+
+// Global runtime configuration
+var runtimeConfig = runtimeConfiguration{
+ lRLoopEntryBranchOpt: true,
+}
+
+type runtimeOption func(*runtimeConfiguration) error
+
+// ConfigureRuntime allows the runtime to be configured globally setting things like trace and statistics options.
+// It uses the functional options pattern for go. This is a package global function as it operates on the runtime
+// configuration regardless of the instantiation of anything higher up such as a parser or lexer. Generally this is
+// used for debugging/tracing/statistics options, which are usually used by the runtime maintainers (or rather the
+// only maintainer). However, it is possible that you might want to use this to set a global option concerning the
+// memory allocation type used by the runtime such as sync.Pool or not.
+//
+// The options are applied in the order they are passed in, so the last option will override any previous options.
+//
+// For example, if you want to turn on the collection create point stack flag to true, you can do:
+//
+// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(true))
+//
+// If you want to turn it off, you can do:
+//
+// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(false))
+func ConfigureRuntime(options ...runtimeOption) error {
+ for _, option := range options {
+ err := option(&runtimeConfig)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// WithStatsTraceStacks sets the global flag indicating whether to collect stack traces at the create-point of
+// certain structs, such as collections, or the use point of certain methods such as Put().
+// Because this can be expensive, it is turned off by default. However, it
+// can be useful to track down exactly where memory is being created and used.
+//
+// Use:
+//
+// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(true))
+//
+// You can turn it off at any time using:
+//
+// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(false))
+func WithStatsTraceStacks(trace bool) runtimeOption {
+ return func(config *runtimeConfiguration) error {
+ config.statsTraceStacks = trace
+ return nil
+ }
+}
+
+// WithLexerATNSimulatorDebug sets the global flag indicating whether to log debug information from the lexer [ATN]
+// simulator. This is useful for debugging lexer issues by comparing the output with the Java runtime. Only useful
+// to the runtime maintainers.
+//
+// Use:
+//
+// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDebug(true))
+//
+// You can turn it off at any time using:
+//
+// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDebug(false))
+func WithLexerATNSimulatorDebug(debug bool) runtimeOption {
+ return func(config *runtimeConfiguration) error {
+ config.lexerATNSimulatorDebug = debug
+ return nil
+ }
+}
+
+// WithLexerATNSimulatorDFADebug sets the global flag indicating whether to log debug information from the lexer [ATN] [DFA]
+// simulator. This is useful for debugging lexer issues by comparing the output with the Java runtime. Only useful
+// to the runtime maintainers.
+//
+// Use:
+//
+// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDFADebug(true))
+//
+// You can turn it off at any time using:
+//
+// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDFADebug(false))
+func WithLexerATNSimulatorDFADebug(debug bool) runtimeOption {
+ return func(config *runtimeConfiguration) error {
+ config.lexerATNSimulatorDFADebug = debug
+ return nil
+ }
+}
+
+// WithParserATNSimulatorDebug sets the global flag indicating whether to log debug information from the parser [ATN]
+// simulator. This is useful for debugging parser issues by comparing the output with the Java runtime. Only useful
+// to the runtime maintainers.
+//
+// Use:
+//
+// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDebug(true))
+//
+// You can turn it off at any time using:
+//
+// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDebug(false))
+func WithParserATNSimulatorDebug(debug bool) runtimeOption {
+ return func(config *runtimeConfiguration) error {
+ config.parserATNSimulatorDebug = debug
+ return nil
+ }
+}
+
+// WithParserATNSimulatorTraceATNSim sets the global flag indicating whether to log trace information from the parser [ATN] simulator
+// [DFA]. This is useful for debugging parser issues by comparing the output with the Java runtime. Only useful
+// to the runtime maintainers.
+//
+// Use:
+//
+// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorTraceATNSim(true))
+//
+// You can turn it off at any time using:
+//
+// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorTraceATNSim(false))
+func WithParserATNSimulatorTraceATNSim(trace bool) runtimeOption {
+ return func(config *runtimeConfiguration) error {
+ config.parserATNSimulatorTraceATNSim = trace
+ return nil
+ }
+}
+
+// WithParserATNSimulatorDFADebug sets the global flag indicating whether to log debug information from the parser [ATN] [DFA]
+// simulator. This is useful for debugging parser issues by comparing the output with the Java runtime. Only useful
+// to the runtime maintainers.
+//
+// Use:
+//
+// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDFADebug(true))
+//
+// You can turn it off at any time using:
+//
+// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDFADebug(false))
+func WithParserATNSimulatorDFADebug(debug bool) runtimeOption {
+ return func(config *runtimeConfiguration) error {
+ config.parserATNSimulatorDFADebug = debug
+ return nil
+ }
+}
+
+// WithParserATNSimulatorRetryDebug sets the global flag indicating whether to log debug information from the parser [ATN] [DFA]
+// simulator when retrying a decision. This is useful for debugging parser issues by comparing the output with the Java runtime.
+// Only useful to the runtime maintainers.
+//
+// Use:
+//
+// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorRetryDebug(true))
+//
+// You can turn it off at any time using:
+//
+// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorRetryDebug(false))
+func WithParserATNSimulatorRetryDebug(debug bool) runtimeOption {
+ return func(config *runtimeConfiguration) error {
+ config.parserATNSimulatorRetryDebug = debug
+ return nil
+ }
+}
+
+// WithLRLoopEntryBranchOpt sets the global flag indicating whether let recursive loop operations should be
+// optimized or not. This is useful for debugging parser issues by comparing the output with the Java runtime.
+// It turns off the functionality of [canDropLoopEntryEdgeInLeftRecursiveRule] in [ParserATNSimulator].
+//
+// Note that default is to use this optimization.
+//
+// Use:
+//
+// antlr.ConfigureRuntime(antlr.WithLRLoopEntryBranchOpt(true))
+//
+// You can turn it off at any time using:
+//
+// antlr.ConfigureRuntime(antlr.WithLRLoopEntryBranchOpt(false))
+func WithLRLoopEntryBranchOpt(off bool) runtimeOption {
+ return func(config *runtimeConfiguration) error {
+ config.lRLoopEntryBranchOpt = off
+ return nil
+ }
+}
+
+// WithMemoryManager sets the global flag indicating whether to use the memory manager or not. This is useful
+// for poorly constructed grammars that create a lot of garbage. It turns on the functionality of [memoryManager], which
+// will intercept garbage collection and cause available memory to be reused. At the end of the day, this is no substitute
+// for fixing your grammar by ridding yourself of extreme ambiguity. BUt if you are just trying to reuse an opensource
+// grammar, this may help make it more practical.
+//
+// Note that default is to use normal Go memory allocation and not pool memory.
+//
+// Use:
+//
+// antlr.ConfigureRuntime(antlr.WithMemoryManager(true))
+//
+// Note that if you turn this on, you should probably leave it on. You should use only one memory strategy or the other
+// and should remember to nil out any references to the parser or lexer when you are done with them.
+func WithMemoryManager(use bool) runtimeOption {
+ return func(config *runtimeConfiguration) error {
+ config.memoryManager = use
+ return nil
+ }
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/dfa.go b/vendor/github.com/antlr4-go/antlr/v4/dfa.go
new file mode 100644
index 000000000..6b63eb158
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/dfa.go
@@ -0,0 +1,175 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// DFA represents the Deterministic Finite Automaton used by the recognizer, including all the states it can
+// reach and the transitions between them.
+type DFA struct {
+ // atnStartState is the ATN state in which this was created
+ atnStartState DecisionState
+
+ decision int
+
+ // states is all the DFA states. Use Map to get the old state back; Set can only
+ // indicate whether it is there. Go maps implement key hash collisions and so on and are very
+ // good, but the DFAState is an object and can't be used directly as the key as it can in say Java
+ // amd C#, whereby if the hashcode is the same for two objects, then Equals() is called against them
+ // to see if they really are the same object. Hence, we have our own map storage.
+ //
+ states *JStore[*DFAState, *ObjEqComparator[*DFAState]]
+
+ numstates int
+
+ s0 *DFAState
+
+ // precedenceDfa is the backing field for isPrecedenceDfa and setPrecedenceDfa.
+ // True if the DFA is for a precedence decision and false otherwise.
+ precedenceDfa bool
+}
+
+func NewDFA(atnStartState DecisionState, decision int) *DFA {
+ dfa := &DFA{
+ atnStartState: atnStartState,
+ decision: decision,
+ states: nil, // Lazy initialize
+ }
+ if s, ok := atnStartState.(*StarLoopEntryState); ok && s.precedenceRuleDecision {
+ dfa.precedenceDfa = true
+ dfa.s0 = NewDFAState(-1, NewATNConfigSet(false))
+ dfa.s0.isAcceptState = false
+ dfa.s0.requiresFullContext = false
+ }
+ return dfa
+}
+
+// getPrecedenceStartState gets the start state for the current precedence and
+// returns the start state corresponding to the specified precedence if a start
+// state exists for the specified precedence and nil otherwise. d must be a
+// precedence DFA. See also isPrecedenceDfa.
+func (d *DFA) getPrecedenceStartState(precedence int) *DFAState {
+ if !d.getPrecedenceDfa() {
+ panic("only precedence DFAs may contain a precedence start state")
+ }
+
+ // s0.edges is never nil for a precedence DFA
+ if precedence < 0 || precedence >= len(d.getS0().getEdges()) {
+ return nil
+ }
+
+ return d.getS0().getIthEdge(precedence)
+}
+
+// setPrecedenceStartState sets the start state for the current precedence. d
+// must be a precedence DFA. See also isPrecedenceDfa.
+func (d *DFA) setPrecedenceStartState(precedence int, startState *DFAState) {
+ if !d.getPrecedenceDfa() {
+ panic("only precedence DFAs may contain a precedence start state")
+ }
+
+ if precedence < 0 {
+ return
+ }
+
+ // Synchronization on s0 here is ok. When the DFA is turned into a
+ // precedence DFA, s0 will be initialized once and not updated again. s0.edges
+ // is never nil for a precedence DFA.
+ s0 := d.getS0()
+ if precedence >= s0.numEdges() {
+ edges := append(s0.getEdges(), make([]*DFAState, precedence+1-s0.numEdges())...)
+ s0.setEdges(edges)
+ d.setS0(s0)
+ }
+
+ s0.setIthEdge(precedence, startState)
+}
+
+func (d *DFA) getPrecedenceDfa() bool {
+ return d.precedenceDfa
+}
+
+// setPrecedenceDfa sets whether d is a precedence DFA. If precedenceDfa differs
+// from the current DFA configuration, then d.states is cleared, the initial
+// state s0 is set to a new DFAState with an empty outgoing DFAState.edges to
+// store the start states for individual precedence values if precedenceDfa is
+// true or nil otherwise, and d.precedenceDfa is updated.
+func (d *DFA) setPrecedenceDfa(precedenceDfa bool) {
+ if d.getPrecedenceDfa() != precedenceDfa {
+ d.states = nil // Lazy initialize
+ d.numstates = 0
+
+ if precedenceDfa {
+ precedenceState := NewDFAState(-1, NewATNConfigSet(false))
+ precedenceState.setEdges(make([]*DFAState, 0))
+ precedenceState.isAcceptState = false
+ precedenceState.requiresFullContext = false
+ d.setS0(precedenceState)
+ } else {
+ d.setS0(nil)
+ }
+
+ d.precedenceDfa = precedenceDfa
+ }
+}
+
+// Len returns the number of states in d. We use this instead of accessing states directly so that we can implement lazy
+// instantiation of the states JMap.
+func (d *DFA) Len() int {
+ if d.states == nil {
+ return 0
+ }
+ return d.states.Len()
+}
+
+// Get returns a state that matches s if it is present in the DFA state set. We defer to this
+// function instead of accessing states directly so that we can implement lazy instantiation of the states JMap.
+func (d *DFA) Get(s *DFAState) (*DFAState, bool) {
+ if d.states == nil {
+ return nil, false
+ }
+ return d.states.Get(s)
+}
+
+func (d *DFA) Put(s *DFAState) (*DFAState, bool) {
+ if d.states == nil {
+ d.states = NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst, DFAStateCollection, "DFA via DFA.Put")
+ }
+ return d.states.Put(s)
+}
+
+func (d *DFA) getS0() *DFAState {
+ return d.s0
+}
+
+func (d *DFA) setS0(s *DFAState) {
+ d.s0 = s
+}
+
+// sortedStates returns the states in d sorted by their state number, or an empty set if d.states is nil.
+func (d *DFA) sortedStates() []*DFAState {
+ if d.states == nil {
+ return []*DFAState{}
+ }
+ vs := d.states.SortedSlice(func(i, j *DFAState) bool {
+ return i.stateNumber < j.stateNumber
+ })
+
+ return vs
+}
+
+func (d *DFA) String(literalNames []string, symbolicNames []string) string {
+ if d.getS0() == nil {
+ return ""
+ }
+
+ return NewDFASerializer(d, literalNames, symbolicNames).String()
+}
+
+func (d *DFA) ToLexerString() string {
+ if d.getS0() == nil {
+ return ""
+ }
+
+ return NewLexerDFASerializer(d).String()
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/dfa_serializer.go b/vendor/github.com/antlr4-go/antlr/v4/dfa_serializer.go
new file mode 100644
index 000000000..0e1100989
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/dfa_serializer.go
@@ -0,0 +1,158 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// DFASerializer is a DFA walker that knows how to dump the DFA states to serialized
+// strings.
+type DFASerializer struct {
+ dfa *DFA
+ literalNames []string
+ symbolicNames []string
+}
+
+func NewDFASerializer(dfa *DFA, literalNames, symbolicNames []string) *DFASerializer {
+ if literalNames == nil {
+ literalNames = make([]string, 0)
+ }
+
+ if symbolicNames == nil {
+ symbolicNames = make([]string, 0)
+ }
+
+ return &DFASerializer{
+ dfa: dfa,
+ literalNames: literalNames,
+ symbolicNames: symbolicNames,
+ }
+}
+
+func (d *DFASerializer) String() string {
+ if d.dfa.getS0() == nil {
+ return ""
+ }
+
+ buf := ""
+ states := d.dfa.sortedStates()
+
+ for _, s := range states {
+ if s.edges != nil {
+ n := len(s.edges)
+
+ for j := 0; j < n; j++ {
+ t := s.edges[j]
+
+ if t != nil && t.stateNumber != 0x7FFFFFFF {
+ buf += d.GetStateString(s)
+ buf += "-"
+ buf += d.getEdgeLabel(j)
+ buf += "->"
+ buf += d.GetStateString(t)
+ buf += "\n"
+ }
+ }
+ }
+ }
+
+ if len(buf) == 0 {
+ return ""
+ }
+
+ return buf
+}
+
+func (d *DFASerializer) getEdgeLabel(i int) string {
+ if i == 0 {
+ return "EOF"
+ } else if d.literalNames != nil && i-1 < len(d.literalNames) {
+ return d.literalNames[i-1]
+ } else if d.symbolicNames != nil && i-1 < len(d.symbolicNames) {
+ return d.symbolicNames[i-1]
+ }
+
+ return strconv.Itoa(i - 1)
+}
+
+func (d *DFASerializer) GetStateString(s *DFAState) string {
+ var a, b string
+
+ if s.isAcceptState {
+ a = ":"
+ }
+
+ if s.requiresFullContext {
+ b = "^"
+ }
+
+ baseStateStr := a + "s" + strconv.Itoa(s.stateNumber) + b
+
+ if s.isAcceptState {
+ if s.predicates != nil {
+ return baseStateStr + "=>" + fmt.Sprint(s.predicates)
+ }
+
+ return baseStateStr + "=>" + fmt.Sprint(s.prediction)
+ }
+
+ return baseStateStr
+}
+
+type LexerDFASerializer struct {
+ *DFASerializer
+}
+
+func NewLexerDFASerializer(dfa *DFA) *LexerDFASerializer {
+ return &LexerDFASerializer{DFASerializer: NewDFASerializer(dfa, nil, nil)}
+}
+
+func (l *LexerDFASerializer) getEdgeLabel(i int) string {
+ var sb strings.Builder
+ sb.Grow(6)
+ sb.WriteByte('\'')
+ sb.WriteRune(rune(i))
+ sb.WriteByte('\'')
+ return sb.String()
+}
+
+func (l *LexerDFASerializer) String() string {
+ if l.dfa.getS0() == nil {
+ return ""
+ }
+
+ buf := ""
+ states := l.dfa.sortedStates()
+
+ for i := 0; i < len(states); i++ {
+ s := states[i]
+
+ if s.edges != nil {
+ n := len(s.edges)
+
+ for j := 0; j < n; j++ {
+ t := s.edges[j]
+
+ if t != nil && t.stateNumber != 0x7FFFFFFF {
+ buf += l.GetStateString(s)
+ buf += "-"
+ buf += l.getEdgeLabel(j)
+ buf += "->"
+ buf += l.GetStateString(t)
+ buf += "\n"
+ }
+ }
+ }
+ }
+
+ if len(buf) == 0 {
+ return ""
+ }
+
+ return buf
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/dfa_state.go b/vendor/github.com/antlr4-go/antlr/v4/dfa_state.go
new file mode 100644
index 000000000..654143074
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/dfa_state.go
@@ -0,0 +1,170 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+)
+
+// PredPrediction maps a predicate to a predicted alternative.
+type PredPrediction struct {
+ alt int
+ pred SemanticContext
+}
+
+func NewPredPrediction(pred SemanticContext, alt int) *PredPrediction {
+ return &PredPrediction{alt: alt, pred: pred}
+}
+
+func (p *PredPrediction) String() string {
+ return "(" + fmt.Sprint(p.pred) + ", " + fmt.Sprint(p.alt) + ")"
+}
+
+// DFAState represents a set of possible [ATN] configurations. As Aho, Sethi,
+// Ullman p. 117 says: "The DFA uses its state to keep track of all possible
+// states the ATN can be in after reading each input symbol. That is to say,
+// after reading input a1, a2,..an, the DFA is in a state that represents the
+// subset T of the states of the ATN that are reachable from the ATN's start
+// state along some path labeled a1a2..an."
+//
+// In conventional NFA-to-DFA conversion, therefore, the subset T would be a bitset representing the set of
+// states the [ATN] could be in. We need to track the alt predicted by each state
+// as well, however. More importantly, we need to maintain a stack of states,
+// tracking the closure operations as they jump from rule to rule, emulating
+// rule invocations (method calls). I have to add a stack to simulate the proper
+// lookahead sequences for the underlying LL grammar from which the ATN was
+// derived.
+//
+// I use a set of [ATNConfig] objects, not simple states. An [ATNConfig] is both a
+// state (ala normal conversion) and a [RuleContext] describing the chain of rules
+// (if any) followed to arrive at that state.
+//
+// A [DFAState] may have multiple references to a particular state, but with
+// different [ATN] contexts (with same or different alts) meaning that state was
+// reached via a different set of rule invocations.
+type DFAState struct {
+ stateNumber int
+ configs *ATNConfigSet
+
+ // edges elements point to the target of the symbol. Shift up by 1 so (-1)
+ // Token.EOF maps to the first element.
+ edges []*DFAState
+
+ isAcceptState bool
+
+ // prediction is the 'ttype' we match or alt we predict if the state is 'accept'.
+ // Set to ATN.INVALID_ALT_NUMBER when predicates != nil or
+ // requiresFullContext.
+ prediction int
+
+ lexerActionExecutor *LexerActionExecutor
+
+ // requiresFullContext indicates it was created during an SLL prediction that
+ // discovered a conflict between the configurations in the state. Future
+ // ParserATNSimulator.execATN invocations immediately jump doing
+ // full context prediction if true.
+ requiresFullContext bool
+
+ // predicates is the predicates associated with the ATN configurations of the
+ // DFA state during SLL parsing. When we have predicates, requiresFullContext
+ // is false, since full context prediction evaluates predicates on-the-fly. If
+ // d is
+ // not nil, then prediction is ATN.INVALID_ALT_NUMBER.
+ //
+ // We only use these for non-requiresFullContext but conflicting states. That
+ // means we know from the context (it's $ or we don't dip into outer context)
+ // that it's an ambiguity not a conflict.
+ //
+ // This list is computed by
+ // ParserATNSimulator.predicateDFAState.
+ predicates []*PredPrediction
+}
+
+func NewDFAState(stateNumber int, configs *ATNConfigSet) *DFAState {
+ if configs == nil {
+ configs = NewATNConfigSet(false)
+ }
+
+ return &DFAState{configs: configs, stateNumber: stateNumber}
+}
+
+// GetAltSet gets the set of all alts mentioned by all ATN configurations in d.
+func (d *DFAState) GetAltSet() []int {
+ var alts []int
+
+ if d.configs != nil {
+ for _, c := range d.configs.configs {
+ alts = append(alts, c.GetAlt())
+ }
+ }
+
+ if len(alts) == 0 {
+ return nil
+ }
+
+ return alts
+}
+
+func (d *DFAState) getEdges() []*DFAState {
+ return d.edges
+}
+
+func (d *DFAState) numEdges() int {
+ return len(d.edges)
+}
+
+func (d *DFAState) getIthEdge(i int) *DFAState {
+ return d.edges[i]
+}
+
+func (d *DFAState) setEdges(newEdges []*DFAState) {
+ d.edges = newEdges
+}
+
+func (d *DFAState) setIthEdge(i int, edge *DFAState) {
+ d.edges[i] = edge
+}
+
+func (d *DFAState) setPrediction(v int) {
+ d.prediction = v
+}
+
+func (d *DFAState) String() string {
+ var s string
+ if d.isAcceptState {
+ if d.predicates != nil {
+ s = "=>" + fmt.Sprint(d.predicates)
+ } else {
+ s = "=>" + fmt.Sprint(d.prediction)
+ }
+ }
+
+ return fmt.Sprintf("%d:%s%s", d.stateNumber, fmt.Sprint(d.configs), s)
+}
+
+func (d *DFAState) Hash() int {
+ h := murmurInit(7)
+ h = murmurUpdate(h, d.configs.Hash())
+ return murmurFinish(h, 1)
+}
+
+// Equals returns whether d equals other. Two DFAStates are equal if their ATN
+// configuration sets are the same. This method is used to see if a state
+// already exists.
+//
+// Because the number of alternatives and number of ATN configurations are
+// finite, there is a finite number of DFA states that can be processed. This is
+// necessary to show that the algorithm terminates.
+//
+// Cannot test the DFA state numbers here because in
+// ParserATNSimulator.addDFAState we need to know if any other state exists that
+// has d exact set of ATN configurations. The stateNumber is irrelevant.
+func (d *DFAState) Equals(o Collectable[*DFAState]) bool {
+ if d == o {
+ return true
+ }
+
+ return d.configs.Equals(o.(*DFAState).configs)
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/diagnostic_error_listener.go b/vendor/github.com/antlr4-go/antlr/v4/diagnostic_error_listener.go
new file mode 100644
index 000000000..bd2cd8bc3
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/diagnostic_error_listener.go
@@ -0,0 +1,110 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "strconv"
+)
+
+//
+// This implementation of {@link ANTLRErrorListener} can be used to identify
+// certain potential correctness and performance problems in grammars. "reports"
+// are made by calling {@link Parser//NotifyErrorListeners} with the appropriate
+// message.
+//
+//
+//
Ambiguities: These are cases where more than one path through the
+// grammar can Match the input.
+//
Weak context sensitivity: These are cases where full-context
+// prediction resolved an SLL conflict to a unique alternative which equaled the
+// minimum alternative of the SLL conflict.
+//
Strong (forced) context sensitivity: These are cases where the
+// full-context prediction resolved an SLL conflict to a unique alternative,
+// and the minimum alternative of the SLL conflict was found to not be
+// a truly viable alternative. Two-stage parsing cannot be used for inputs where
+// d situation occurs.
+//
+
+type DiagnosticErrorListener struct {
+ *DefaultErrorListener
+
+ exactOnly bool
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func NewDiagnosticErrorListener(exactOnly bool) *DiagnosticErrorListener {
+
+ n := new(DiagnosticErrorListener)
+
+ // whether all ambiguities or only exact ambiguities are Reported.
+ n.exactOnly = exactOnly
+ return n
+}
+
+func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet) {
+ if d.exactOnly && !exact {
+ return
+ }
+ msg := "reportAmbiguity d=" +
+ d.getDecisionDescription(recognizer, dfa) +
+ ": ambigAlts=" +
+ d.getConflictingAlts(ambigAlts, configs).String() +
+ ", input='" +
+ recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
+ recognizer.NotifyErrorListeners(msg, nil, nil)
+}
+
+func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, _ *BitSet, _ *ATNConfigSet) {
+
+ msg := "reportAttemptingFullContext d=" +
+ d.getDecisionDescription(recognizer, dfa) +
+ ", input='" +
+ recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
+ recognizer.NotifyErrorListeners(msg, nil, nil)
+}
+
+func (d *DiagnosticErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, _ int, _ *ATNConfigSet) {
+ msg := "reportContextSensitivity d=" +
+ d.getDecisionDescription(recognizer, dfa) +
+ ", input='" +
+ recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
+ recognizer.NotifyErrorListeners(msg, nil, nil)
+}
+
+func (d *DiagnosticErrorListener) getDecisionDescription(recognizer Parser, dfa *DFA) string {
+ decision := dfa.decision
+ ruleIndex := dfa.atnStartState.GetRuleIndex()
+
+ ruleNames := recognizer.GetRuleNames()
+ if ruleIndex < 0 || ruleIndex >= len(ruleNames) {
+ return strconv.Itoa(decision)
+ }
+ ruleName := ruleNames[ruleIndex]
+ if ruleName == "" {
+ return strconv.Itoa(decision)
+ }
+ return strconv.Itoa(decision) + " (" + ruleName + ")"
+}
+
+// Computes the set of conflicting or ambiguous alternatives from a
+// configuration set, if that information was not already provided by the
+// parser.
+//
+// @param ReportedAlts The set of conflicting or ambiguous alternatives, as
+// Reported by the parser.
+// @param configs The conflicting or ambiguous configuration set.
+// @return Returns {@code ReportedAlts} if it is not {@code nil}, otherwise
+// returns the set of alternatives represented in {@code configs}.
+func (d *DiagnosticErrorListener) getConflictingAlts(ReportedAlts *BitSet, set *ATNConfigSet) *BitSet {
+ if ReportedAlts != nil {
+ return ReportedAlts
+ }
+ result := NewBitSet()
+ for _, c := range set.configs {
+ result.add(c.GetAlt())
+ }
+
+ return result
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/error_listener.go b/vendor/github.com/antlr4-go/antlr/v4/error_listener.go
new file mode 100644
index 000000000..21a021643
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/error_listener.go
@@ -0,0 +1,100 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+)
+
+// Provides an empty default implementation of {@link ANTLRErrorListener}. The
+// default implementation of each method does nothing, but can be overridden as
+// necessary.
+
+type ErrorListener interface {
+ SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException)
+ ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet)
+ ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs *ATNConfigSet)
+ ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs *ATNConfigSet)
+}
+
+type DefaultErrorListener struct {
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func NewDefaultErrorListener() *DefaultErrorListener {
+ return new(DefaultErrorListener)
+}
+
+func (d *DefaultErrorListener) SyntaxError(_ Recognizer, _ interface{}, _, _ int, _ string, _ RecognitionException) {
+}
+
+func (d *DefaultErrorListener) ReportAmbiguity(_ Parser, _ *DFA, _, _ int, _ bool, _ *BitSet, _ *ATNConfigSet) {
+}
+
+func (d *DefaultErrorListener) ReportAttemptingFullContext(_ Parser, _ *DFA, _, _ int, _ *BitSet, _ *ATNConfigSet) {
+}
+
+func (d *DefaultErrorListener) ReportContextSensitivity(_ Parser, _ *DFA, _, _, _ int, _ *ATNConfigSet) {
+}
+
+type ConsoleErrorListener struct {
+ *DefaultErrorListener
+}
+
+func NewConsoleErrorListener() *ConsoleErrorListener {
+ return new(ConsoleErrorListener)
+}
+
+// ConsoleErrorListenerINSTANCE provides a default instance of {@link ConsoleErrorListener}.
+var ConsoleErrorListenerINSTANCE = NewConsoleErrorListener()
+
+// SyntaxError prints messages to System.err containing the
+// values of line, charPositionInLine, and msg using
+// the following format:
+//
+// line :
+func (c *ConsoleErrorListener) SyntaxError(_ Recognizer, _ interface{}, line, column int, msg string, _ RecognitionException) {
+ _, _ = fmt.Fprintln(os.Stderr, "line "+strconv.Itoa(line)+":"+strconv.Itoa(column)+" "+msg)
+}
+
+type ProxyErrorListener struct {
+ *DefaultErrorListener
+ delegates []ErrorListener
+}
+
+func NewProxyErrorListener(delegates []ErrorListener) *ProxyErrorListener {
+ if delegates == nil {
+ panic("delegates is not provided")
+ }
+ l := new(ProxyErrorListener)
+ l.delegates = delegates
+ return l
+}
+
+func (p *ProxyErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
+ for _, d := range p.delegates {
+ d.SyntaxError(recognizer, offendingSymbol, line, column, msg, e)
+ }
+}
+
+func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet) {
+ for _, d := range p.delegates {
+ d.ReportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs)
+ }
+}
+
+func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs *ATNConfigSet) {
+ for _, d := range p.delegates {
+ d.ReportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs)
+ }
+}
+
+func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs *ATNConfigSet) {
+ for _, d := range p.delegates {
+ d.ReportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs)
+ }
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/error_strategy.go b/vendor/github.com/antlr4-go/antlr/v4/error_strategy.go
new file mode 100644
index 000000000..9db2be1c7
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/error_strategy.go
@@ -0,0 +1,702 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+type ErrorStrategy interface {
+ reset(Parser)
+ RecoverInline(Parser) Token
+ Recover(Parser, RecognitionException)
+ Sync(Parser)
+ InErrorRecoveryMode(Parser) bool
+ ReportError(Parser, RecognitionException)
+ ReportMatch(Parser)
+}
+
+// DefaultErrorStrategy is the default implementation of ANTLRErrorStrategy used for
+// error reporting and recovery in ANTLR parsers.
+type DefaultErrorStrategy struct {
+ errorRecoveryMode bool
+ lastErrorIndex int
+ lastErrorStates *IntervalSet
+}
+
+var _ ErrorStrategy = &DefaultErrorStrategy{}
+
+func NewDefaultErrorStrategy() *DefaultErrorStrategy {
+
+ d := new(DefaultErrorStrategy)
+
+ // Indicates whether the error strategy is currently "recovering from an
+ // error". This is used to suppress Reporting multiple error messages while
+ // attempting to recover from a detected syntax error.
+ //
+ // @see //InErrorRecoveryMode
+ //
+ d.errorRecoveryMode = false
+
+ // The index into the input stream where the last error occurred.
+ // This is used to prevent infinite loops where an error is found
+ // but no token is consumed during recovery...another error is found,
+ // ad nauseam. This is a failsafe mechanism to guarantee that at least
+ // one token/tree node is consumed for two errors.
+ //
+ d.lastErrorIndex = -1
+ d.lastErrorStates = nil
+ return d
+}
+
+//
The default implementation simply calls {@link //endErrorCondition} to
+// ensure that the handler is not in error recovery mode.
+func (d *DefaultErrorStrategy) reset(recognizer Parser) {
+ d.endErrorCondition(recognizer)
+}
+
+// This method is called to enter error recovery mode when a recognition
+// exception is Reported.
+func (d *DefaultErrorStrategy) beginErrorCondition(_ Parser) {
+ d.errorRecoveryMode = true
+}
+
+func (d *DefaultErrorStrategy) InErrorRecoveryMode(_ Parser) bool {
+ return d.errorRecoveryMode
+}
+
+// This method is called to leave error recovery mode after recovering from
+// a recognition exception.
+func (d *DefaultErrorStrategy) endErrorCondition(_ Parser) {
+ d.errorRecoveryMode = false
+ d.lastErrorStates = nil
+ d.lastErrorIndex = -1
+}
+
+// ReportMatch is the default implementation of error matching and simply calls endErrorCondition.
+func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser) {
+ d.endErrorCondition(recognizer)
+}
+
+// ReportError is the default implementation of error reporting.
+// It returns immediately if the handler is already
+// in error recovery mode. Otherwise, it calls [beginErrorCondition]
+// and dispatches the Reporting task based on the runtime type of e
+// according to the following table.
+//
+// [NoViableAltException] : Dispatches the call to [ReportNoViableAlternative]
+// [InputMisMatchException] : Dispatches the call to [ReportInputMisMatch]
+// [FailedPredicateException] : Dispatches the call to [ReportFailedPredicate]
+// All other types : Calls [NotifyErrorListeners] to Report the exception
+func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionException) {
+ // if we've already Reported an error and have not Matched a token
+ // yet successfully, don't Report any errors.
+ if d.InErrorRecoveryMode(recognizer) {
+ return // don't Report spurious errors
+ }
+ d.beginErrorCondition(recognizer)
+
+ switch t := e.(type) {
+ default:
+ fmt.Println("unknown recognition error type: " + reflect.TypeOf(e).Name())
+ // fmt.Println(e.stack)
+ recognizer.NotifyErrorListeners(e.GetMessage(), e.GetOffendingToken(), e)
+ case *NoViableAltException:
+ d.ReportNoViableAlternative(recognizer, t)
+ case *InputMisMatchException:
+ d.ReportInputMisMatch(recognizer, t)
+ case *FailedPredicateException:
+ d.ReportFailedPredicate(recognizer, t)
+ }
+}
+
+// Recover is the default recovery implementation.
+// It reSynchronizes the parser by consuming tokens until we find one in the reSynchronization set -
+// loosely the set of tokens that can follow the current rule.
+func (d *DefaultErrorStrategy) Recover(recognizer Parser, _ RecognitionException) {
+
+ if d.lastErrorIndex == recognizer.GetInputStream().Index() &&
+ d.lastErrorStates != nil && d.lastErrorStates.contains(recognizer.GetState()) {
+ // uh oh, another error at same token index and previously-Visited
+ // state in ATN must be a case where LT(1) is in the recovery
+ // token set so nothing got consumed. Consume a single token
+ // at least to prevent an infinite loop d is a failsafe.
+ recognizer.Consume()
+ }
+ d.lastErrorIndex = recognizer.GetInputStream().Index()
+ if d.lastErrorStates == nil {
+ d.lastErrorStates = NewIntervalSet()
+ }
+ d.lastErrorStates.addOne(recognizer.GetState())
+ followSet := d.GetErrorRecoverySet(recognizer)
+ d.consumeUntil(recognizer, followSet)
+}
+
+// Sync is the default implementation of error strategy synchronization.
+//
+// This Sync makes sure that the current lookahead symbol is consistent with what were expecting
+// at this point in the [ATN]. You can call this anytime but ANTLR only
+// generates code to check before sub-rules/loops and each iteration.
+//
+// Implements [Jim Idle]'s magic Sync mechanism in closures and optional
+// sub-rules. E.g.:
+//
+// a : Sync ( stuff Sync )*
+// Sync : {consume to what can follow Sync}
+//
+// At the start of a sub-rule upon error, Sync performs single
+// token deletion, if possible. If it can't do that, it bails on the current
+// rule and uses the default error recovery, which consumes until the
+// reSynchronization set of the current rule.
+//
+// If the sub-rule is optional
+//
+// ({@code (...)?}, {@code (...)*},
+//
+// or a block with an empty alternative), then the expected set includes what follows
+// the sub-rule.
+//
+// During loop iteration, it consumes until it sees a token that can start a
+// sub-rule or what follows loop. Yes, that is pretty aggressive. We opt to
+// stay in the loop as long as possible.
+//
+// # Origins
+//
+// Previous versions of ANTLR did a poor job of their recovery within loops.
+// A single mismatch token or missing token would force the parser to bail
+// out of the entire rules surrounding the loop. So, for rule:
+//
+// classfunc : 'class' ID '{' member* '}'
+//
+// input with an extra token between members would force the parser to
+// consume until it found the next class definition rather than the next
+// member definition of the current class.
+//
+// This functionality cost a bit of effort because the parser has to
+// compare the token set at the start of the loop and at each iteration. If for
+// some reason speed is suffering for you, you can turn off this
+// functionality by simply overriding this method as empty:
+//
+// { }
+//
+// [Jim Idle]: https://github.com/jimidle
+func (d *DefaultErrorStrategy) Sync(recognizer Parser) {
+ // If already recovering, don't try to Sync
+ if d.InErrorRecoveryMode(recognizer) {
+ return
+ }
+
+ s := recognizer.GetInterpreter().atn.states[recognizer.GetState()]
+ la := recognizer.GetTokenStream().LA(1)
+
+ // try cheaper subset first might get lucky. seems to shave a wee bit off
+ nextTokens := recognizer.GetATN().NextTokens(s, nil)
+ if nextTokens.contains(TokenEpsilon) || nextTokens.contains(la) {
+ return
+ }
+
+ switch s.GetStateType() {
+ case ATNStateBlockStart, ATNStateStarBlockStart, ATNStatePlusBlockStart, ATNStateStarLoopEntry:
+ // Report error and recover if possible
+ if d.SingleTokenDeletion(recognizer) != nil {
+ return
+ }
+ recognizer.SetError(NewInputMisMatchException(recognizer))
+ case ATNStatePlusLoopBack, ATNStateStarLoopBack:
+ d.ReportUnwantedToken(recognizer)
+ expecting := NewIntervalSet()
+ expecting.addSet(recognizer.GetExpectedTokens())
+ whatFollowsLoopIterationOrRule := expecting.addSet(d.GetErrorRecoverySet(recognizer))
+ d.consumeUntil(recognizer, whatFollowsLoopIterationOrRule)
+ default:
+ // do nothing if we can't identify the exact kind of ATN state
+ }
+}
+
+// ReportNoViableAlternative is called by [ReportError] when the exception is a [NoViableAltException].
+//
+// See also [ReportError]
+func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *NoViableAltException) {
+ tokens := recognizer.GetTokenStream()
+ var input string
+ if tokens != nil {
+ if e.startToken.GetTokenType() == TokenEOF {
+ input = ""
+ } else {
+ input = tokens.GetTextFromTokens(e.startToken, e.offendingToken)
+ }
+ } else {
+ input = ""
+ }
+ msg := "no viable alternative at input " + d.escapeWSAndQuote(input)
+ recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
+}
+
+// ReportInputMisMatch is called by [ReportError] when the exception is an [InputMisMatchException]
+//
+// See also: [ReportError]
+func (d *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException) {
+ msg := "mismatched input " + d.GetTokenErrorDisplay(e.offendingToken) +
+ " expecting " + e.getExpectedTokens().StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false)
+ recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
+}
+
+// ReportFailedPredicate is called by [ReportError] when the exception is a [FailedPredicateException].
+//
+// See also: [ReportError]
+func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *FailedPredicateException) {
+ ruleName := recognizer.GetRuleNames()[recognizer.GetParserRuleContext().GetRuleIndex()]
+ msg := "rule " + ruleName + " " + e.message
+ recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
+}
+
+// ReportUnwantedToken is called to report a syntax error that requires the removal
+// of a token from the input stream. At the time d method is called, the
+// erroneous symbol is the current LT(1) symbol and has not yet been
+// removed from the input stream. When this method returns,
+// recognizer is in error recovery mode.
+//
+// This method is called when singleTokenDeletion identifies
+// single-token deletion as a viable recovery strategy for a mismatched
+// input error.
+//
+// The default implementation simply returns if the handler is already in
+// error recovery mode. Otherwise, it calls beginErrorCondition to
+// enter error recovery mode, followed by calling
+// [NotifyErrorListeners]
+func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) {
+ if d.InErrorRecoveryMode(recognizer) {
+ return
+ }
+ d.beginErrorCondition(recognizer)
+ t := recognizer.GetCurrentToken()
+ tokenName := d.GetTokenErrorDisplay(t)
+ expecting := d.GetExpectedTokens(recognizer)
+ msg := "extraneous input " + tokenName + " expecting " +
+ expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false)
+ recognizer.NotifyErrorListeners(msg, t, nil)
+}
+
+// ReportMissingToken is called to report a syntax error which requires the
+// insertion of a missing token into the input stream. At the time this
+// method is called, the missing token has not yet been inserted. When this
+// method returns, recognizer is in error recovery mode.
+//
+// This method is called when singleTokenInsertion identifies
+// single-token insertion as a viable recovery strategy for a mismatched
+// input error.
+//
+// The default implementation simply returns if the handler is already in
+// error recovery mode. Otherwise, it calls beginErrorCondition to
+// enter error recovery mode, followed by calling [NotifyErrorListeners]
+func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) {
+ if d.InErrorRecoveryMode(recognizer) {
+ return
+ }
+ d.beginErrorCondition(recognizer)
+ t := recognizer.GetCurrentToken()
+ expecting := d.GetExpectedTokens(recognizer)
+ msg := "missing " + expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) +
+ " at " + d.GetTokenErrorDisplay(t)
+ recognizer.NotifyErrorListeners(msg, t, nil)
+}
+
+// The RecoverInline default implementation attempts to recover from the mismatched input
+// by using single token insertion and deletion as described below. If the
+// recovery attempt fails, this method panics with [InputMisMatchException}.
+// TODO: Not sure that panic() is the right thing to do here - JI
+//
+// # EXTRA TOKEN (single token deletion)
+//
+// LA(1) is not what we are looking for. If LA(2) has the
+// right token, however, then assume LA(1) is some extra spurious
+// token and delete it. Then consume and return the next token (which was
+// the LA(2) token) as the successful result of the Match operation.
+//
+// # This recovery strategy is implemented by singleTokenDeletion
+//
+// # MISSING TOKEN (single token insertion)
+//
+// If current token -at LA(1) - is consistent with what could come
+// after the expected LA(1) token, then assume the token is missing
+// and use the parser's [TokenFactory] to create it on the fly. The
+// “insertion” is performed by returning the created token as the successful
+// result of the Match operation.
+//
+// This recovery strategy is implemented by [SingleTokenInsertion].
+//
+// # Example
+//
+// For example, Input i=(3 is clearly missing the ')'. When
+// the parser returns from the nested call to expr, it will have
+// call the chain:
+//
+// stat → expr → atom
+//
+// and it will be trying to Match the ')' at this point in the
+// derivation:
+//
+// : ID '=' '(' INT ')' ('+' atom)* ';'
+// ^
+//
+// The attempt to [Match] ')' will fail when it sees ';' and
+// call [RecoverInline]. To recover, it sees that LA(1)==';'
+// is in the set of tokens that can follow the ')' token reference
+// in rule atom. It can assume that you forgot the ')'.
+func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token {
+ // SINGLE TOKEN DELETION
+ MatchedSymbol := d.SingleTokenDeletion(recognizer)
+ if MatchedSymbol != nil {
+ // we have deleted the extra token.
+ // now, move past ttype token as if all were ok
+ recognizer.Consume()
+ return MatchedSymbol
+ }
+ // SINGLE TOKEN INSERTION
+ if d.SingleTokenInsertion(recognizer) {
+ return d.GetMissingSymbol(recognizer)
+ }
+ // even that didn't work must panic the exception
+ recognizer.SetError(NewInputMisMatchException(recognizer))
+ return nil
+}
+
+// SingleTokenInsertion implements the single-token insertion inline error recovery
+// strategy. It is called by [RecoverInline] if the single-token
+// deletion strategy fails to recover from the mismatched input. If this
+// method returns {@code true}, {@code recognizer} will be in error recovery
+// mode.
+//
+// This method determines whether single-token insertion is viable by
+// checking if the LA(1) input symbol could be successfully Matched
+// if it were instead the LA(2) symbol. If this method returns
+// {@code true}, the caller is responsible for creating and inserting a
+// token with the correct type to produce this behavior.
+//
+// This func returns true if single-token insertion is a viable recovery
+// strategy for the current mismatched input.
+func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool {
+ currentSymbolType := recognizer.GetTokenStream().LA(1)
+ // if current token is consistent with what could come after current
+ // ATN state, then we know we're missing a token error recovery
+ // is free to conjure up and insert the missing token
+ atn := recognizer.GetInterpreter().atn
+ currentState := atn.states[recognizer.GetState()]
+ next := currentState.GetTransitions()[0].getTarget()
+ expectingAtLL2 := atn.NextTokens(next, recognizer.GetParserRuleContext())
+ if expectingAtLL2.contains(currentSymbolType) {
+ d.ReportMissingToken(recognizer)
+ return true
+ }
+
+ return false
+}
+
+// SingleTokenDeletion implements the single-token deletion inline error recovery
+// strategy. It is called by [RecoverInline] to attempt to recover
+// from mismatched input. If this method returns nil, the parser and error
+// handler state will not have changed. If this method returns non-nil,
+// recognizer will not be in error recovery mode since the
+// returned token was a successful Match.
+//
+// If the single-token deletion is successful, this method calls
+// [ReportUnwantedToken] to Report the error, followed by
+// [Consume] to actually “delete” the extraneous token. Then,
+// before returning, [ReportMatch] is called to signal a successful
+// Match.
+//
+// The func returns the successfully Matched [Token] instance if single-token
+// deletion successfully recovers from the mismatched input, otherwise nil.
+func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token {
+ NextTokenType := recognizer.GetTokenStream().LA(2)
+ expecting := d.GetExpectedTokens(recognizer)
+ if expecting.contains(NextTokenType) {
+ d.ReportUnwantedToken(recognizer)
+ // print("recoverFromMisMatchedToken deleting " \
+ // + str(recognizer.GetTokenStream().LT(1)) \
+ // + " since " + str(recognizer.GetTokenStream().LT(2)) \
+ // + " is what we want", file=sys.stderr)
+ recognizer.Consume() // simply delete extra token
+ // we want to return the token we're actually Matching
+ MatchedSymbol := recognizer.GetCurrentToken()
+ d.ReportMatch(recognizer) // we know current token is correct
+ return MatchedSymbol
+ }
+
+ return nil
+}
+
+// GetMissingSymbol conjures up a missing token during error recovery.
+//
+// The recognizer attempts to recover from single missing
+// symbols. But, actions might refer to that missing symbol.
+// For example:
+//
+// x=ID {f($x)}.
+//
+// The action clearly assumes
+// that there has been an identifier Matched previously and that
+// $x points at that token. If that token is missing, but
+// the next token in the stream is what we want we assume that
+// this token is missing, and we keep going. Because we
+// have to return some token to replace the missing token,
+// we have to conjure one up. This method gives the user control
+// over the tokens returned for missing tokens. Mostly,
+// you will want to create something special for identifier
+// tokens. For literals such as '{' and ',', the default
+// action in the parser or tree parser works. It simply creates
+// a [CommonToken] of the appropriate type. The text will be the token name.
+// If you need to change which tokens must be created by the lexer,
+// override this method to create the appropriate tokens.
+func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token {
+ currentSymbol := recognizer.GetCurrentToken()
+ expecting := d.GetExpectedTokens(recognizer)
+ expectedTokenType := expecting.first()
+ var tokenText string
+
+ if expectedTokenType == TokenEOF {
+ tokenText = ""
+ } else {
+ ln := recognizer.GetLiteralNames()
+ if expectedTokenType > 0 && expectedTokenType < len(ln) {
+ tokenText = ""
+ } else {
+ tokenText = "" // TODO: matches the JS impl
+ }
+ }
+ current := currentSymbol
+ lookback := recognizer.GetTokenStream().LT(-1)
+ if current.GetTokenType() == TokenEOF && lookback != nil {
+ current = lookback
+ }
+
+ tf := recognizer.GetTokenFactory()
+
+ return tf.Create(current.GetSource(), expectedTokenType, tokenText, TokenDefaultChannel, -1, -1, current.GetLine(), current.GetColumn())
+}
+
+func (d *DefaultErrorStrategy) GetExpectedTokens(recognizer Parser) *IntervalSet {
+ return recognizer.GetExpectedTokens()
+}
+
+// GetTokenErrorDisplay determines how a token should be displayed in an error message.
+// The default is to display just the text, but during development you might
+// want to have a lot of information spit out. Override this func in that case
+// to use t.String() (which, for [CommonToken], dumps everything about
+// the token). This is better than forcing you to override a method in
+// your token objects because you don't have to go modify your lexer
+// so that it creates a new type.
+func (d *DefaultErrorStrategy) GetTokenErrorDisplay(t Token) string {
+ if t == nil {
+ return ""
+ }
+ s := t.GetText()
+ if s == "" {
+ if t.GetTokenType() == TokenEOF {
+ s = ""
+ } else {
+ s = "<" + strconv.Itoa(t.GetTokenType()) + ">"
+ }
+ }
+ return d.escapeWSAndQuote(s)
+}
+
+func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string {
+ s = strings.Replace(s, "\t", "\\t", -1)
+ s = strings.Replace(s, "\n", "\\n", -1)
+ s = strings.Replace(s, "\r", "\\r", -1)
+ return "'" + s + "'"
+}
+
+// GetErrorRecoverySet computes the error recovery set for the current rule. During
+// rule invocation, the parser pushes the set of tokens that can
+// follow that rule reference on the stack. This amounts to
+// computing FIRST of what follows the rule reference in the
+// enclosing rule. See LinearApproximator.FIRST().
+//
+// This local follow set only includes tokens
+// from within the rule i.e., the FIRST computation done by
+// ANTLR stops at the end of a rule.
+//
+// # Example
+//
+// When you find a "no viable alt exception", the input is not
+// consistent with any of the alternatives for rule r. The best
+// thing to do is to consume tokens until you see something that
+// can legally follow a call to r or any rule that called r.
+// You don't want the exact set of viable next tokens because the
+// input might just be missing a token--you might consume the
+// rest of the input looking for one of the missing tokens.
+//
+// Consider the grammar:
+//
+// a : '[' b ']'
+// | '(' b ')'
+// ;
+//
+// b : c '^' INT
+// ;
+//
+// c : ID
+// | INT
+// ;
+//
+// At each rule invocation, the set of tokens that could follow
+// that rule is pushed on a stack. Here are the various
+// context-sensitive follow sets:
+//
+// FOLLOW(b1_in_a) = FIRST(']') = ']'
+// FOLLOW(b2_in_a) = FIRST(')') = ')'
+// FOLLOW(c_in_b) = FIRST('^') = '^'
+//
+// Upon erroneous input “[]”, the call chain is
+//
+// a → b → c
+//
+// and, hence, the follow context stack is:
+//
+// Depth Follow set Start of rule execution
+// 0 a (from main())
+// 1 ']' b
+// 2 '^' c
+//
+// Notice that ')' is not included, because b would have to have
+// been called from a different context in rule a for ')' to be
+// included.
+//
+// For error recovery, we cannot consider FOLLOW(c)
+// (context-sensitive or otherwise). We need the combined set of
+// all context-sensitive FOLLOW sets - the set of all tokens that
+// could follow any reference in the call chain. We need to
+// reSync to one of those tokens. Note that FOLLOW(c)='^' and if
+// we reSync'd to that token, we'd consume until EOF. We need to
+// Sync to context-sensitive FOLLOWs for a, b, and c:
+//
+// {']','^'}
+//
+// In this case, for input "[]", LA(1) is ']' and in the set, so we would
+// not consume anything. After printing an error, rule c would
+// return normally. Rule b would not find the required '^' though.
+// At this point, it gets a mismatched token error and panics an
+// exception (since LA(1) is not in the viable following token
+// set). The rule exception handler tries to recover, but finds
+// the same recovery set and doesn't consume anything. Rule b
+// exits normally returning to rule a. Now it finds the ']' (and
+// with the successful Match exits errorRecovery mode).
+//
+// So, you can see that the parser walks up the call chain looking
+// for the token that was a member of the recovery set.
+//
+// Errors are not generated in errorRecovery mode.
+//
+// ANTLR's error recovery mechanism is based upon original ideas:
+//
+// [Algorithms + Data Structures = Programs] by Niklaus Wirth and
+// [A note on error recovery in recursive descent parsers].
+//
+// Later, Josef Grosch had some good ideas in [Efficient and Comfortable Error Recovery in Recursive Descent
+// Parsers]
+//
+// Like Grosch I implement context-sensitive FOLLOW sets that are combined at run-time upon error to avoid overhead
+// during parsing. Later, the runtime Sync was improved for loops/sub-rules see [Sync] docs
+//
+// [A note on error recovery in recursive descent parsers]: http://portal.acm.org/citation.cfm?id=947902.947905
+// [Algorithms + Data Structures = Programs]: https://t.ly/5QzgE
+// [Efficient and Comfortable Error Recovery in Recursive Descent Parsers]: ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
+func (d *DefaultErrorStrategy) GetErrorRecoverySet(recognizer Parser) *IntervalSet {
+ atn := recognizer.GetInterpreter().atn
+ ctx := recognizer.GetParserRuleContext()
+ recoverSet := NewIntervalSet()
+ for ctx != nil && ctx.GetInvokingState() >= 0 {
+ // compute what follows who invoked us
+ invokingState := atn.states[ctx.GetInvokingState()]
+ rt := invokingState.GetTransitions()[0]
+ follow := atn.NextTokens(rt.(*RuleTransition).followState, nil)
+ recoverSet.addSet(follow)
+ ctx = ctx.GetParent().(ParserRuleContext)
+ }
+ recoverSet.removeOne(TokenEpsilon)
+ return recoverSet
+}
+
+// Consume tokens until one Matches the given token set.//
+func (d *DefaultErrorStrategy) consumeUntil(recognizer Parser, set *IntervalSet) {
+ ttype := recognizer.GetTokenStream().LA(1)
+ for ttype != TokenEOF && !set.contains(ttype) {
+ recognizer.Consume()
+ ttype = recognizer.GetTokenStream().LA(1)
+ }
+}
+
+// The BailErrorStrategy implementation of ANTLRErrorStrategy responds to syntax errors
+// by immediately canceling the parse operation with a
+// [ParseCancellationException]. The implementation ensures that the
+// [ParserRuleContext//exception] field is set for all parse tree nodes
+// that were not completed prior to encountering the error.
+//
+// This error strategy is useful in the following scenarios.
+//
+// - Two-stage parsing: This error strategy allows the first
+// stage of two-stage parsing to immediately terminate if an error is
+// encountered, and immediately fall back to the second stage. In addition to
+// avoiding wasted work by attempting to recover from errors here, the empty
+// implementation of [BailErrorStrategy.Sync] improves the performance of
+// the first stage.
+//
+// - Silent validation: When syntax errors are not being
+// Reported or logged, and the parse result is simply ignored if errors occur,
+// the [BailErrorStrategy] avoids wasting work on recovering from errors
+// when the result will be ignored either way.
+//
+// myparser.SetErrorHandler(NewBailErrorStrategy())
+//
+// See also: [Parser.SetErrorHandler(ANTLRErrorStrategy)]
+type BailErrorStrategy struct {
+ *DefaultErrorStrategy
+}
+
+var _ ErrorStrategy = &BailErrorStrategy{}
+
+//goland:noinspection GoUnusedExportedFunction
+func NewBailErrorStrategy() *BailErrorStrategy {
+
+ b := new(BailErrorStrategy)
+
+ b.DefaultErrorStrategy = NewDefaultErrorStrategy()
+
+ return b
+}
+
+// Recover Instead of recovering from exception e, re-panic it wrapped
+// in a [ParseCancellationException] so it is not caught by the
+// rule func catches. Use Exception.GetCause() to get the
+// original [RecognitionException].
+func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) {
+ context := recognizer.GetParserRuleContext()
+ for context != nil {
+ context.SetException(e)
+ if parent, ok := context.GetParent().(ParserRuleContext); ok {
+ context = parent
+ } else {
+ context = nil
+ }
+ }
+ recognizer.SetError(NewParseCancellationException()) // TODO: we don't emit e properly
+}
+
+// RecoverInline makes sure we don't attempt to recover inline if the parser
+// successfully recovers, it won't panic an exception.
+func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token {
+ b.Recover(recognizer, NewInputMisMatchException(recognizer))
+
+ return nil
+}
+
+// Sync makes sure we don't attempt to recover from problems in sub-rules.
+func (b *BailErrorStrategy) Sync(_ Parser) {
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/errors.go b/vendor/github.com/antlr4-go/antlr/v4/errors.go
new file mode 100644
index 000000000..8f0f2f601
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/errors.go
@@ -0,0 +1,259 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// The root of the ANTLR exception hierarchy. In general, ANTLR tracks just
+// 3 kinds of errors: prediction errors, failed predicate errors, and
+// mismatched input errors. In each case, the parser knows where it is
+// in the input, where it is in the ATN, the rule invocation stack,
+// and what kind of problem occurred.
+
+type RecognitionException interface {
+ GetOffendingToken() Token
+ GetMessage() string
+ GetInputStream() IntStream
+}
+
+type BaseRecognitionException struct {
+ message string
+ recognizer Recognizer
+ offendingToken Token
+ offendingState int
+ ctx RuleContext
+ input IntStream
+}
+
+func NewBaseRecognitionException(message string, recognizer Recognizer, input IntStream, ctx RuleContext) *BaseRecognitionException {
+
+ // todo
+ // Error.call(this)
+ //
+ // if (!!Error.captureStackTrace) {
+ // Error.captureStackTrace(this, RecognitionException)
+ // } else {
+ // stack := NewError().stack
+ // }
+ // TODO: may be able to use - "runtime" func Stack(buf []byte, all bool) int
+
+ t := new(BaseRecognitionException)
+
+ t.message = message
+ t.recognizer = recognizer
+ t.input = input
+ t.ctx = ctx
+
+ // The current Token when an error occurred. Since not all streams
+ // support accessing symbols by index, we have to track the {@link Token}
+ // instance itself.
+ //
+ t.offendingToken = nil
+
+ // Get the ATN state number the parser was in at the time the error
+ // occurred. For NoViableAltException and LexerNoViableAltException exceptions, this is the
+ // DecisionState number. For others, it is the state whose outgoing edge we couldn't Match.
+ //
+ t.offendingState = -1
+ if t.recognizer != nil {
+ t.offendingState = t.recognizer.GetState()
+ }
+
+ return t
+}
+
+func (b *BaseRecognitionException) GetMessage() string {
+ return b.message
+}
+
+func (b *BaseRecognitionException) GetOffendingToken() Token {
+ return b.offendingToken
+}
+
+func (b *BaseRecognitionException) GetInputStream() IntStream {
+ return b.input
+}
+
+//
If the state number is not known, b method returns -1.
+
+// getExpectedTokens gets the set of input symbols which could potentially follow the
+// previously Matched symbol at the time this exception was raised.
+//
+// If the set of expected tokens is not known and could not be computed,
+// this method returns nil.
+//
+// The func returns the set of token types that could potentially follow the current
+// state in the {ATN}, or nil if the information is not available.
+
+func (b *BaseRecognitionException) getExpectedTokens() *IntervalSet {
+ if b.recognizer != nil {
+ return b.recognizer.GetATN().getExpectedTokens(b.offendingState, b.ctx)
+ }
+
+ return nil
+}
+
+func (b *BaseRecognitionException) String() string {
+ return b.message
+}
+
+type LexerNoViableAltException struct {
+ *BaseRecognitionException
+
+ startIndex int
+ deadEndConfigs *ATNConfigSet
+}
+
+func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs *ATNConfigSet) *LexerNoViableAltException {
+
+ l := new(LexerNoViableAltException)
+
+ l.BaseRecognitionException = NewBaseRecognitionException("", lexer, input, nil)
+
+ l.startIndex = startIndex
+ l.deadEndConfigs = deadEndConfigs
+
+ return l
+}
+
+func (l *LexerNoViableAltException) String() string {
+ symbol := ""
+ if l.startIndex >= 0 && l.startIndex < l.input.Size() {
+ symbol = l.input.(CharStream).GetTextFromInterval(NewInterval(l.startIndex, l.startIndex))
+ }
+ return "LexerNoViableAltException" + symbol
+}
+
+type NoViableAltException struct {
+ *BaseRecognitionException
+
+ startToken Token
+ offendingToken Token
+ ctx ParserRuleContext
+ deadEndConfigs *ATNConfigSet
+}
+
+// NewNoViableAltException creates an exception indicating that the parser could not decide which of two or more paths
+// to take based upon the remaining input. It tracks the starting token
+// of the offending input and also knows where the parser was
+// in the various paths when the error.
+//
+// Reported by [ReportNoViableAlternative]
+func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs *ATNConfigSet, ctx ParserRuleContext) *NoViableAltException {
+
+ if ctx == nil {
+ ctx = recognizer.GetParserRuleContext()
+ }
+
+ if offendingToken == nil {
+ offendingToken = recognizer.GetCurrentToken()
+ }
+
+ if startToken == nil {
+ startToken = recognizer.GetCurrentToken()
+ }
+
+ if input == nil {
+ input = recognizer.GetInputStream().(TokenStream)
+ }
+
+ n := new(NoViableAltException)
+ n.BaseRecognitionException = NewBaseRecognitionException("", recognizer, input, ctx)
+
+ // Which configurations did we try at input.Index() that couldn't Match
+ // input.LT(1)
+ n.deadEndConfigs = deadEndConfigs
+
+ // The token object at the start index the input stream might
+ // not be buffering tokens so get a reference to it.
+ //
+ // At the time the error occurred, of course the stream needs to keep a
+ // buffer of all the tokens, but later we might not have access to those.
+ n.startToken = startToken
+ n.offendingToken = offendingToken
+
+ return n
+}
+
+type InputMisMatchException struct {
+ *BaseRecognitionException
+}
+
+// NewInputMisMatchException creates an exception that signifies any kind of mismatched input exceptions such as
+// when the current input does not Match the expected token.
+func NewInputMisMatchException(recognizer Parser) *InputMisMatchException {
+
+ i := new(InputMisMatchException)
+ i.BaseRecognitionException = NewBaseRecognitionException("", recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext())
+
+ i.offendingToken = recognizer.GetCurrentToken()
+
+ return i
+
+}
+
+// FailedPredicateException indicates that a semantic predicate failed during validation. Validation of predicates
+// occurs when normally parsing the alternative just like Matching a token.
+// Disambiguating predicate evaluation occurs when we test a predicate during
+// prediction.
+type FailedPredicateException struct {
+ *BaseRecognitionException
+
+ ruleIndex int
+ predicateIndex int
+ predicate string
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func NewFailedPredicateException(recognizer Parser, predicate string, message string) *FailedPredicateException {
+
+ f := new(FailedPredicateException)
+
+ f.BaseRecognitionException = NewBaseRecognitionException(f.formatMessage(predicate, message), recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext())
+
+ s := recognizer.GetInterpreter().atn.states[recognizer.GetState()]
+ trans := s.GetTransitions()[0]
+ if trans2, ok := trans.(*PredicateTransition); ok {
+ f.ruleIndex = trans2.ruleIndex
+ f.predicateIndex = trans2.predIndex
+ } else {
+ f.ruleIndex = 0
+ f.predicateIndex = 0
+ }
+ f.predicate = predicate
+ f.offendingToken = recognizer.GetCurrentToken()
+
+ return f
+}
+
+func (f *FailedPredicateException) formatMessage(predicate, message string) string {
+ if message != "" {
+ return message
+ }
+
+ return "failed predicate: {" + predicate + "}?"
+}
+
+type ParseCancellationException struct {
+}
+
+func (p ParseCancellationException) GetOffendingToken() Token {
+ //TODO implement me
+ panic("implement me")
+}
+
+func (p ParseCancellationException) GetMessage() string {
+ //TODO implement me
+ panic("implement me")
+}
+
+func (p ParseCancellationException) GetInputStream() IntStream {
+ //TODO implement me
+ panic("implement me")
+}
+
+func NewParseCancellationException() *ParseCancellationException {
+ // Error.call(this)
+ // Error.captureStackTrace(this, ParseCancellationException)
+ return new(ParseCancellationException)
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/file_stream.go b/vendor/github.com/antlr4-go/antlr/v4/file_stream.go
new file mode 100644
index 000000000..5f65f809b
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/file_stream.go
@@ -0,0 +1,67 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "bufio"
+ "os"
+)
+
+// This is an InputStream that is loaded from a file all at once
+// when you construct the object.
+
+type FileStream struct {
+ InputStream
+ filename string
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func NewFileStream(fileName string) (*FileStream, error) {
+
+ f, err := os.Open(fileName)
+ if err != nil {
+ return nil, err
+ }
+
+ defer func(f *os.File) {
+ errF := f.Close()
+ if errF != nil {
+ }
+ }(f)
+
+ reader := bufio.NewReader(f)
+ fInfo, err := f.Stat()
+ if err != nil {
+ return nil, err
+ }
+
+ fs := &FileStream{
+ InputStream: InputStream{
+ index: 0,
+ name: fileName,
+ },
+ filename: fileName,
+ }
+
+ // Pre-build the buffer and read runes efficiently
+ //
+ fs.data = make([]rune, 0, fInfo.Size())
+ for {
+ r, _, err := reader.ReadRune()
+ if err != nil {
+ break
+ }
+ fs.data = append(fs.data, r)
+ }
+ fs.size = len(fs.data) // Size in runes
+
+ // All done.
+ //
+ return fs, nil
+}
+
+func (f *FileStream) GetSourceName() string {
+ return f.filename
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/input_stream.go b/vendor/github.com/antlr4-go/antlr/v4/input_stream.go
new file mode 100644
index 000000000..b737fe85f
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/input_stream.go
@@ -0,0 +1,157 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "bufio"
+ "io"
+)
+
+type InputStream struct {
+ name string
+ index int
+ data []rune
+ size int
+}
+
+// NewIoStream creates a new input stream from the given io.Reader reader.
+// Note that the reader is read completely into memory and so it must actually
+// have a stopping point - you cannot pass in a reader on an open-ended source such
+// as a socket for instance.
+func NewIoStream(reader io.Reader) *InputStream {
+
+ rReader := bufio.NewReader(reader)
+
+ is := &InputStream{
+ name: "",
+ index: 0,
+ }
+
+ // Pre-build the buffer and read runes reasonably efficiently given that
+ // we don't exactly know how big the input is.
+ //
+ is.data = make([]rune, 0, 512)
+ for {
+ r, _, err := rReader.ReadRune()
+ if err != nil {
+ break
+ }
+ is.data = append(is.data, r)
+ }
+ is.size = len(is.data) // number of runes
+ return is
+}
+
+// NewInputStream creates a new input stream from the given string
+func NewInputStream(data string) *InputStream {
+
+ is := &InputStream{
+ name: "",
+ index: 0,
+ data: []rune(data), // This is actually the most efficient way
+ }
+ is.size = len(is.data) // number of runes, but we could also use len(data), which is efficient too
+ return is
+}
+
+func (is *InputStream) reset() {
+ is.index = 0
+}
+
+// Consume moves the input pointer to the next character in the input stream
+func (is *InputStream) Consume() {
+ if is.index >= is.size {
+ // assert is.LA(1) == TokenEOF
+ panic("cannot consume EOF")
+ }
+ is.index++
+}
+
+// LA returns the character at the given offset from the start of the input stream
+func (is *InputStream) LA(offset int) int {
+
+ if offset == 0 {
+ return 0 // nil
+ }
+ if offset < 0 {
+ offset++ // e.g., translate LA(-1) to use offset=0
+ }
+ pos := is.index + offset - 1
+
+ if pos < 0 || pos >= is.size { // invalid
+ return TokenEOF
+ }
+
+ return int(is.data[pos])
+}
+
+// LT returns the character at the given offset from the start of the input stream
+func (is *InputStream) LT(offset int) int {
+ return is.LA(offset)
+}
+
+// Index returns the current offset in to the input stream
+func (is *InputStream) Index() int {
+ return is.index
+}
+
+// Size returns the total number of characters in the input stream
+func (is *InputStream) Size() int {
+ return is.size
+}
+
+// Mark does nothing here as we have entire buffer
+func (is *InputStream) Mark() int {
+ return -1
+}
+
+// Release does nothing here as we have entire buffer
+func (is *InputStream) Release(_ int) {
+}
+
+// Seek the input point to the provided index offset
+func (is *InputStream) Seek(index int) {
+ if index <= is.index {
+ is.index = index // just jump don't update stream state (line,...)
+ return
+ }
+ // seek forward
+ is.index = intMin(index, is.size)
+}
+
+// GetText returns the text from the input stream from the start to the stop index
+func (is *InputStream) GetText(start int, stop int) string {
+ if stop >= is.size {
+ stop = is.size - 1
+ }
+ if start >= is.size {
+ return ""
+ }
+
+ return string(is.data[start : stop+1])
+}
+
+// GetTextFromTokens returns the text from the input stream from the first character of the start token to the last
+// character of the stop token
+func (is *InputStream) GetTextFromTokens(start, stop Token) string {
+ if start != nil && stop != nil {
+ return is.GetTextFromInterval(NewInterval(start.GetTokenIndex(), stop.GetTokenIndex()))
+ }
+
+ return ""
+}
+
+func (is *InputStream) GetTextFromInterval(i Interval) string {
+ return is.GetText(i.Start, i.Stop)
+}
+
+func (*InputStream) GetSourceName() string {
+ return ""
+}
+
+// String returns the entire input stream as a string
+func (is *InputStream) String() string {
+ return string(is.data)
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/int_stream.go b/vendor/github.com/antlr4-go/antlr/v4/int_stream.go
new file mode 100644
index 000000000..4778878bd
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/int_stream.go
@@ -0,0 +1,16 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+type IntStream interface {
+ Consume()
+ LA(int) int
+ Mark() int
+ Release(marker int)
+ Index() int
+ Seek(index int)
+ Size() int
+ GetSourceName() string
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/interval_set.go b/vendor/github.com/antlr4-go/antlr/v4/interval_set.go
new file mode 100644
index 000000000..cc5066067
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/interval_set.go
@@ -0,0 +1,330 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "strconv"
+ "strings"
+)
+
+type Interval struct {
+ Start int
+ Stop int
+}
+
+// NewInterval creates a new interval with the given start and stop values.
+func NewInterval(start, stop int) Interval {
+ return Interval{
+ Start: start,
+ Stop: stop,
+ }
+}
+
+// Contains returns true if the given item is contained within the interval.
+func (i Interval) Contains(item int) bool {
+ return item >= i.Start && item < i.Stop
+}
+
+// String generates a string representation of the interval.
+func (i Interval) String() string {
+ if i.Start == i.Stop-1 {
+ return strconv.Itoa(i.Start)
+ }
+
+ return strconv.Itoa(i.Start) + ".." + strconv.Itoa(i.Stop-1)
+}
+
+// Length returns the length of the interval.
+func (i Interval) Length() int {
+ return i.Stop - i.Start
+}
+
+// IntervalSet represents a collection of [Intervals], which may be read-only.
+type IntervalSet struct {
+ intervals []Interval
+ readOnly bool
+}
+
+// NewIntervalSet creates a new empty, writable, interval set.
+func NewIntervalSet() *IntervalSet {
+
+ i := new(IntervalSet)
+
+ i.intervals = nil
+ i.readOnly = false
+
+ return i
+}
+
+func (i *IntervalSet) Equals(other *IntervalSet) bool {
+ if len(i.intervals) != len(other.intervals) {
+ return false
+ }
+
+ for k, v := range i.intervals {
+ if v.Start != other.intervals[k].Start || v.Stop != other.intervals[k].Stop {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (i *IntervalSet) first() int {
+ if len(i.intervals) == 0 {
+ return TokenInvalidType
+ }
+
+ return i.intervals[0].Start
+}
+
+func (i *IntervalSet) addOne(v int) {
+ i.addInterval(NewInterval(v, v+1))
+}
+
+func (i *IntervalSet) addRange(l, h int) {
+ i.addInterval(NewInterval(l, h+1))
+}
+
+func (i *IntervalSet) addInterval(v Interval) {
+ if i.intervals == nil {
+ i.intervals = make([]Interval, 0)
+ i.intervals = append(i.intervals, v)
+ } else {
+ // find insert pos
+ for k, interval := range i.intervals {
+ // distinct range -> insert
+ if v.Stop < interval.Start {
+ i.intervals = append(i.intervals[0:k], append([]Interval{v}, i.intervals[k:]...)...)
+ return
+ } else if v.Stop == interval.Start {
+ i.intervals[k].Start = v.Start
+ return
+ } else if v.Start <= interval.Stop {
+ i.intervals[k] = NewInterval(intMin(interval.Start, v.Start), intMax(interval.Stop, v.Stop))
+
+ // if not applying to end, merge potential overlaps
+ if k < len(i.intervals)-1 {
+ l := i.intervals[k]
+ r := i.intervals[k+1]
+ // if r contained in l
+ if l.Stop >= r.Stop {
+ i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...)
+ } else if l.Stop >= r.Start { // partial overlap
+ i.intervals[k] = NewInterval(l.Start, r.Stop)
+ i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...)
+ }
+ }
+ return
+ }
+ }
+ // greater than any exiting
+ i.intervals = append(i.intervals, v)
+ }
+}
+
+func (i *IntervalSet) addSet(other *IntervalSet) *IntervalSet {
+ if other.intervals != nil {
+ for k := 0; k < len(other.intervals); k++ {
+ i2 := other.intervals[k]
+ i.addInterval(NewInterval(i2.Start, i2.Stop))
+ }
+ }
+ return i
+}
+
+func (i *IntervalSet) complement(start int, stop int) *IntervalSet {
+ result := NewIntervalSet()
+ result.addInterval(NewInterval(start, stop+1))
+ for j := 0; j < len(i.intervals); j++ {
+ result.removeRange(i.intervals[j])
+ }
+ return result
+}
+
+func (i *IntervalSet) contains(item int) bool {
+ if i.intervals == nil {
+ return false
+ }
+ for k := 0; k < len(i.intervals); k++ {
+ if i.intervals[k].Contains(item) {
+ return true
+ }
+ }
+ return false
+}
+
+func (i *IntervalSet) length() int {
+ iLen := 0
+
+ for _, v := range i.intervals {
+ iLen += v.Length()
+ }
+
+ return iLen
+}
+
+func (i *IntervalSet) removeRange(v Interval) {
+ if v.Start == v.Stop-1 {
+ i.removeOne(v.Start)
+ } else if i.intervals != nil {
+ k := 0
+ for n := 0; n < len(i.intervals); n++ {
+ ni := i.intervals[k]
+ // intervals are ordered
+ if v.Stop <= ni.Start {
+ return
+ } else if v.Start > ni.Start && v.Stop < ni.Stop {
+ i.intervals[k] = NewInterval(ni.Start, v.Start)
+ x := NewInterval(v.Stop, ni.Stop)
+ // i.intervals.splice(k, 0, x)
+ i.intervals = append(i.intervals[0:k], append([]Interval{x}, i.intervals[k:]...)...)
+ return
+ } else if v.Start <= ni.Start && v.Stop >= ni.Stop {
+ // i.intervals.splice(k, 1)
+ i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...)
+ k = k - 1 // need another pass
+ } else if v.Start < ni.Stop {
+ i.intervals[k] = NewInterval(ni.Start, v.Start)
+ } else if v.Stop < ni.Stop {
+ i.intervals[k] = NewInterval(v.Stop, ni.Stop)
+ }
+ k++
+ }
+ }
+}
+
+func (i *IntervalSet) removeOne(v int) {
+ if i.intervals != nil {
+ for k := 0; k < len(i.intervals); k++ {
+ ki := i.intervals[k]
+ // intervals i ordered
+ if v < ki.Start {
+ return
+ } else if v == ki.Start && v == ki.Stop-1 {
+ // i.intervals.splice(k, 1)
+ i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...)
+ return
+ } else if v == ki.Start {
+ i.intervals[k] = NewInterval(ki.Start+1, ki.Stop)
+ return
+ } else if v == ki.Stop-1 {
+ i.intervals[k] = NewInterval(ki.Start, ki.Stop-1)
+ return
+ } else if v < ki.Stop-1 {
+ x := NewInterval(ki.Start, v)
+ ki.Start = v + 1
+ // i.intervals.splice(k, 0, x)
+ i.intervals = append(i.intervals[0:k], append([]Interval{x}, i.intervals[k:]...)...)
+ return
+ }
+ }
+ }
+}
+
+func (i *IntervalSet) String() string {
+ return i.StringVerbose(nil, nil, false)
+}
+
+func (i *IntervalSet) StringVerbose(literalNames []string, symbolicNames []string, elemsAreChar bool) string {
+
+ if i.intervals == nil {
+ return "{}"
+ } else if literalNames != nil || symbolicNames != nil {
+ return i.toTokenString(literalNames, symbolicNames)
+ } else if elemsAreChar {
+ return i.toCharString()
+ }
+
+ return i.toIndexString()
+}
+
+func (i *IntervalSet) GetIntervals() []Interval {
+ return i.intervals
+}
+
+func (i *IntervalSet) toCharString() string {
+ names := make([]string, len(i.intervals))
+
+ var sb strings.Builder
+
+ for j := 0; j < len(i.intervals); j++ {
+ v := i.intervals[j]
+ if v.Stop == v.Start+1 {
+ if v.Start == TokenEOF {
+ names = append(names, "")
+ } else {
+ sb.WriteByte('\'')
+ sb.WriteRune(rune(v.Start))
+ sb.WriteByte('\'')
+ names = append(names, sb.String())
+ sb.Reset()
+ }
+ } else {
+ sb.WriteByte('\'')
+ sb.WriteRune(rune(v.Start))
+ sb.WriteString("'..'")
+ sb.WriteRune(rune(v.Stop - 1))
+ sb.WriteByte('\'')
+ names = append(names, sb.String())
+ sb.Reset()
+ }
+ }
+ if len(names) > 1 {
+ return "{" + strings.Join(names, ", ") + "}"
+ }
+
+ return names[0]
+}
+
+func (i *IntervalSet) toIndexString() string {
+
+ names := make([]string, 0)
+ for j := 0; j < len(i.intervals); j++ {
+ v := i.intervals[j]
+ if v.Stop == v.Start+1 {
+ if v.Start == TokenEOF {
+ names = append(names, "")
+ } else {
+ names = append(names, strconv.Itoa(v.Start))
+ }
+ } else {
+ names = append(names, strconv.Itoa(v.Start)+".."+strconv.Itoa(v.Stop-1))
+ }
+ }
+ if len(names) > 1 {
+ return "{" + strings.Join(names, ", ") + "}"
+ }
+
+ return names[0]
+}
+
+func (i *IntervalSet) toTokenString(literalNames []string, symbolicNames []string) string {
+ names := make([]string, 0)
+ for _, v := range i.intervals {
+ for j := v.Start; j < v.Stop; j++ {
+ names = append(names, i.elementName(literalNames, symbolicNames, j))
+ }
+ }
+ if len(names) > 1 {
+ return "{" + strings.Join(names, ", ") + "}"
+ }
+
+ return names[0]
+}
+
+func (i *IntervalSet) elementName(literalNames []string, symbolicNames []string, a int) string {
+ if a == TokenEOF {
+ return ""
+ } else if a == TokenEpsilon {
+ return ""
+ } else {
+ if a < len(literalNames) && literalNames[a] != "" {
+ return literalNames[a]
+ }
+
+ return symbolicNames[a]
+ }
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/jcollect.go b/vendor/github.com/antlr4-go/antlr/v4/jcollect.go
new file mode 100644
index 000000000..ceccd96d2
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/jcollect.go
@@ -0,0 +1,685 @@
+package antlr
+
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+import (
+ "container/list"
+ "runtime/debug"
+ "sort"
+ "sync"
+)
+
+// Collectable is an interface that a struct should implement if it is to be
+// usable as a key in these collections.
+type Collectable[T any] interface {
+ Hash() int
+ Equals(other Collectable[T]) bool
+}
+
+type Comparator[T any] interface {
+ Hash1(o T) int
+ Equals2(T, T) bool
+}
+
+type CollectionSource int
+type CollectionDescriptor struct {
+ SybolicName string
+ Description string
+}
+
+const (
+ UnknownCollection CollectionSource = iota
+ ATNConfigLookupCollection
+ ATNStateCollection
+ DFAStateCollection
+ ATNConfigCollection
+ PredictionContextCollection
+ SemanticContextCollection
+ ClosureBusyCollection
+ PredictionVisitedCollection
+ MergeCacheCollection
+ PredictionContextCacheCollection
+ AltSetCollection
+ ReachSetCollection
+)
+
+var CollectionDescriptors = map[CollectionSource]CollectionDescriptor{
+ UnknownCollection: {
+ SybolicName: "UnknownCollection",
+ Description: "Unknown collection type. Only used if the target author thought it was an unimportant collection.",
+ },
+ ATNConfigCollection: {
+ SybolicName: "ATNConfigCollection",
+ Description: "ATNConfig collection. Used to store the ATNConfigs for a particular state in the ATN." +
+ "For instance, it is used to store the results of the closure() operation in the ATN.",
+ },
+ ATNConfigLookupCollection: {
+ SybolicName: "ATNConfigLookupCollection",
+ Description: "ATNConfigLookup collection. Used to store the ATNConfigs for a particular state in the ATN." +
+ "This is used to prevent duplicating equivalent states in an ATNConfigurationSet.",
+ },
+ ATNStateCollection: {
+ SybolicName: "ATNStateCollection",
+ Description: "ATNState collection. This is used to store the states of the ATN.",
+ },
+ DFAStateCollection: {
+ SybolicName: "DFAStateCollection",
+ Description: "DFAState collection. This is used to store the states of the DFA.",
+ },
+ PredictionContextCollection: {
+ SybolicName: "PredictionContextCollection",
+ Description: "PredictionContext collection. This is used to store the prediction contexts of the ATN and cache computes.",
+ },
+ SemanticContextCollection: {
+ SybolicName: "SemanticContextCollection",
+ Description: "SemanticContext collection. This is used to store the semantic contexts of the ATN.",
+ },
+ ClosureBusyCollection: {
+ SybolicName: "ClosureBusyCollection",
+ Description: "ClosureBusy collection. This is used to check and prevent infinite recursion right recursive rules." +
+ "It stores ATNConfigs that are currently being processed in the closure() operation.",
+ },
+ PredictionVisitedCollection: {
+ SybolicName: "PredictionVisitedCollection",
+ Description: "A map that records whether we have visited a particular context when searching through cached entries.",
+ },
+ MergeCacheCollection: {
+ SybolicName: "MergeCacheCollection",
+ Description: "A map that records whether we have already merged two particular contexts and can save effort by not repeating it.",
+ },
+ PredictionContextCacheCollection: {
+ SybolicName: "PredictionContextCacheCollection",
+ Description: "A map that records whether we have already created a particular context and can save effort by not computing it again.",
+ },
+ AltSetCollection: {
+ SybolicName: "AltSetCollection",
+ Description: "Used to eliminate duplicate alternatives in an ATN config set.",
+ },
+ ReachSetCollection: {
+ SybolicName: "ReachSetCollection",
+ Description: "Used as merge cache to prevent us needing to compute the merge of two states if we have already done it.",
+ },
+}
+
+// JStore implements a container that allows the use of a struct to calculate the key
+// for a collection of values akin to map. This is not meant to be a full-blown HashMap but just
+// serve the needs of the ANTLR Go runtime.
+//
+// For ease of porting the logic of the runtime from the master target (Java), this collection
+// operates in a similar way to Java, in that it can use any struct that supplies a Hash() and Equals()
+// function as the key. The values are stored in a standard go map which internally is a form of hashmap
+// itself, the key for the go map is the hash supplied by the key object. The collection is able to deal with
+// hash conflicts by using a simple slice of values associated with the hash code indexed bucket. That isn't
+// particularly efficient, but it is simple, and it works. As this is specifically for the ANTLR runtime, and
+// we understand the requirements, then this is fine - this is not a general purpose collection.
+type JStore[T any, C Comparator[T]] struct {
+ store map[int][]T
+ len int
+ comparator Comparator[T]
+ stats *JStatRec
+}
+
+func NewJStore[T any, C Comparator[T]](comparator Comparator[T], cType CollectionSource, desc string) *JStore[T, C] {
+
+ if comparator == nil {
+ panic("comparator cannot be nil")
+ }
+
+ s := &JStore[T, C]{
+ store: make(map[int][]T, 1),
+ comparator: comparator,
+ }
+ if collectStats {
+ s.stats = &JStatRec{
+ Source: cType,
+ Description: desc,
+ }
+
+ // Track where we created it from if we are being asked to do so
+ if runtimeConfig.statsTraceStacks {
+ s.stats.CreateStack = debug.Stack()
+ }
+ Statistics.AddJStatRec(s.stats)
+ }
+ return s
+}
+
+// Put will store given value in the collection. Note that the key for storage is generated from
+// the value itself - this is specifically because that is what ANTLR needs - this would not be useful
+// as any kind of general collection.
+//
+// If the key has a hash conflict, then the value will be added to the slice of values associated with the
+// hash, unless the value is already in the slice, in which case the existing value is returned. Value equivalence is
+// tested by calling the equals() method on the key.
+//
+// # If the given value is already present in the store, then the existing value is returned as v and exists is set to true
+//
+// If the given value is not present in the store, then the value is added to the store and returned as v and exists is set to false.
+func (s *JStore[T, C]) Put(value T) (v T, exists bool) {
+
+ if collectStats {
+ s.stats.Puts++
+ }
+ kh := s.comparator.Hash1(value)
+
+ var hClash bool
+ for _, v1 := range s.store[kh] {
+ hClash = true
+ if s.comparator.Equals2(value, v1) {
+ if collectStats {
+ s.stats.PutHits++
+ s.stats.PutHashConflicts++
+ }
+ return v1, true
+ }
+ if collectStats {
+ s.stats.PutMisses++
+ }
+ }
+ if collectStats && hClash {
+ s.stats.PutHashConflicts++
+ }
+ s.store[kh] = append(s.store[kh], value)
+
+ if collectStats {
+ if len(s.store[kh]) > s.stats.MaxSlotSize {
+ s.stats.MaxSlotSize = len(s.store[kh])
+ }
+ }
+ s.len++
+ if collectStats {
+ s.stats.CurSize = s.len
+ if s.len > s.stats.MaxSize {
+ s.stats.MaxSize = s.len
+ }
+ }
+ return value, false
+}
+
+// Get will return the value associated with the key - the type of the key is the same type as the value
+// which would not generally be useful, but this is a specific thing for ANTLR where the key is
+// generated using the object we are going to store.
+func (s *JStore[T, C]) Get(key T) (T, bool) {
+ if collectStats {
+ s.stats.Gets++
+ }
+ kh := s.comparator.Hash1(key)
+ var hClash bool
+ for _, v := range s.store[kh] {
+ hClash = true
+ if s.comparator.Equals2(key, v) {
+ if collectStats {
+ s.stats.GetHits++
+ s.stats.GetHashConflicts++
+ }
+ return v, true
+ }
+ if collectStats {
+ s.stats.GetMisses++
+ }
+ }
+ if collectStats {
+ if hClash {
+ s.stats.GetHashConflicts++
+ }
+ s.stats.GetNoEnt++
+ }
+ return key, false
+}
+
+// Contains returns true if the given key is present in the store
+func (s *JStore[T, C]) Contains(key T) bool {
+ _, present := s.Get(key)
+ return present
+}
+
+func (s *JStore[T, C]) SortedSlice(less func(i, j T) bool) []T {
+ vs := make([]T, 0, len(s.store))
+ for _, v := range s.store {
+ vs = append(vs, v...)
+ }
+ sort.Slice(vs, func(i, j int) bool {
+ return less(vs[i], vs[j])
+ })
+
+ return vs
+}
+
+func (s *JStore[T, C]) Each(f func(T) bool) {
+ for _, e := range s.store {
+ for _, v := range e {
+ f(v)
+ }
+ }
+}
+
+func (s *JStore[T, C]) Len() int {
+ return s.len
+}
+
+func (s *JStore[T, C]) Values() []T {
+ vs := make([]T, 0, len(s.store))
+ for _, e := range s.store {
+ vs = append(vs, e...)
+ }
+ return vs
+}
+
+type entry[K, V any] struct {
+ key K
+ val V
+}
+
+type JMap[K, V any, C Comparator[K]] struct {
+ store map[int][]*entry[K, V]
+ len int
+ comparator Comparator[K]
+ stats *JStatRec
+}
+
+func NewJMap[K, V any, C Comparator[K]](comparator Comparator[K], cType CollectionSource, desc string) *JMap[K, V, C] {
+ m := &JMap[K, V, C]{
+ store: make(map[int][]*entry[K, V], 1),
+ comparator: comparator,
+ }
+ if collectStats {
+ m.stats = &JStatRec{
+ Source: cType,
+ Description: desc,
+ }
+ // Track where we created it from if we are being asked to do so
+ if runtimeConfig.statsTraceStacks {
+ m.stats.CreateStack = debug.Stack()
+ }
+ Statistics.AddJStatRec(m.stats)
+ }
+ return m
+}
+
+func (m *JMap[K, V, C]) Put(key K, val V) (V, bool) {
+ if collectStats {
+ m.stats.Puts++
+ }
+ kh := m.comparator.Hash1(key)
+
+ var hClash bool
+ for _, e := range m.store[kh] {
+ hClash = true
+ if m.comparator.Equals2(e.key, key) {
+ if collectStats {
+ m.stats.PutHits++
+ m.stats.PutHashConflicts++
+ }
+ return e.val, true
+ }
+ if collectStats {
+ m.stats.PutMisses++
+ }
+ }
+ if collectStats {
+ if hClash {
+ m.stats.PutHashConflicts++
+ }
+ }
+ m.store[kh] = append(m.store[kh], &entry[K, V]{key, val})
+ if collectStats {
+ if len(m.store[kh]) > m.stats.MaxSlotSize {
+ m.stats.MaxSlotSize = len(m.store[kh])
+ }
+ }
+ m.len++
+ if collectStats {
+ m.stats.CurSize = m.len
+ if m.len > m.stats.MaxSize {
+ m.stats.MaxSize = m.len
+ }
+ }
+ return val, false
+}
+
+func (m *JMap[K, V, C]) Values() []V {
+ vs := make([]V, 0, len(m.store))
+ for _, e := range m.store {
+ for _, v := range e {
+ vs = append(vs, v.val)
+ }
+ }
+ return vs
+}
+
+func (m *JMap[K, V, C]) Get(key K) (V, bool) {
+ if collectStats {
+ m.stats.Gets++
+ }
+ var none V
+ kh := m.comparator.Hash1(key)
+ var hClash bool
+ for _, e := range m.store[kh] {
+ hClash = true
+ if m.comparator.Equals2(e.key, key) {
+ if collectStats {
+ m.stats.GetHits++
+ m.stats.GetHashConflicts++
+ }
+ return e.val, true
+ }
+ if collectStats {
+ m.stats.GetMisses++
+ }
+ }
+ if collectStats {
+ if hClash {
+ m.stats.GetHashConflicts++
+ }
+ m.stats.GetNoEnt++
+ }
+ return none, false
+}
+
+func (m *JMap[K, V, C]) Len() int {
+ return m.len
+}
+
+func (m *JMap[K, V, C]) Delete(key K) {
+ kh := m.comparator.Hash1(key)
+ for i, e := range m.store[kh] {
+ if m.comparator.Equals2(e.key, key) {
+ m.store[kh] = append(m.store[kh][:i], m.store[kh][i+1:]...)
+ m.len--
+ return
+ }
+ }
+}
+
+func (m *JMap[K, V, C]) Clear() {
+ m.store = make(map[int][]*entry[K, V])
+}
+
+type JPCMap struct {
+ store *JMap[*PredictionContext, *JMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]], *ObjEqComparator[*PredictionContext]]
+ size int
+ stats *JStatRec
+}
+
+func NewJPCMap(cType CollectionSource, desc string) *JPCMap {
+ m := &JPCMap{
+ store: NewJMap[*PredictionContext, *JMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]], *ObjEqComparator[*PredictionContext]](pContextEqInst, cType, desc),
+ }
+ if collectStats {
+ m.stats = &JStatRec{
+ Source: cType,
+ Description: desc,
+ }
+ // Track where we created it from if we are being asked to do so
+ if runtimeConfig.statsTraceStacks {
+ m.stats.CreateStack = debug.Stack()
+ }
+ Statistics.AddJStatRec(m.stats)
+ }
+ return m
+}
+
+func (pcm *JPCMap) Get(k1, k2 *PredictionContext) (*PredictionContext, bool) {
+ if collectStats {
+ pcm.stats.Gets++
+ }
+ // Do we have a map stored by k1?
+ //
+ m2, present := pcm.store.Get(k1)
+ if present {
+ if collectStats {
+ pcm.stats.GetHits++
+ }
+ // We found a map of values corresponding to k1, so now we need to look up k2 in that map
+ //
+ return m2.Get(k2)
+ }
+ if collectStats {
+ pcm.stats.GetMisses++
+ }
+ return nil, false
+}
+
+func (pcm *JPCMap) Put(k1, k2, v *PredictionContext) {
+
+ if collectStats {
+ pcm.stats.Puts++
+ }
+ // First does a map already exist for k1?
+ //
+ if m2, present := pcm.store.Get(k1); present {
+ if collectStats {
+ pcm.stats.PutHits++
+ }
+ _, present = m2.Put(k2, v)
+ if !present {
+ pcm.size++
+ if collectStats {
+ pcm.stats.CurSize = pcm.size
+ if pcm.size > pcm.stats.MaxSize {
+ pcm.stats.MaxSize = pcm.size
+ }
+ }
+ }
+ } else {
+ // No map found for k1, so we create it, add in our value, then store is
+ //
+ if collectStats {
+ pcm.stats.PutMisses++
+ m2 = NewJMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]](pContextEqInst, pcm.stats.Source, pcm.stats.Description+" map entry")
+ } else {
+ m2 = NewJMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]](pContextEqInst, PredictionContextCacheCollection, "map entry")
+ }
+
+ m2.Put(k2, v)
+ pcm.store.Put(k1, m2)
+ pcm.size++
+ }
+}
+
+type JPCMap2 struct {
+ store map[int][]JPCEntry
+ size int
+ stats *JStatRec
+}
+
+type JPCEntry struct {
+ k1, k2, v *PredictionContext
+}
+
+func NewJPCMap2(cType CollectionSource, desc string) *JPCMap2 {
+ m := &JPCMap2{
+ store: make(map[int][]JPCEntry, 1000),
+ }
+ if collectStats {
+ m.stats = &JStatRec{
+ Source: cType,
+ Description: desc,
+ }
+ // Track where we created it from if we are being asked to do so
+ if runtimeConfig.statsTraceStacks {
+ m.stats.CreateStack = debug.Stack()
+ }
+ Statistics.AddJStatRec(m.stats)
+ }
+ return m
+}
+
+func dHash(k1, k2 *PredictionContext) int {
+ return k1.cachedHash*31 + k2.cachedHash
+}
+
+func (pcm *JPCMap2) Get(k1, k2 *PredictionContext) (*PredictionContext, bool) {
+ if collectStats {
+ pcm.stats.Gets++
+ }
+
+ h := dHash(k1, k2)
+ var hClash bool
+ for _, e := range pcm.store[h] {
+ hClash = true
+ if e.k1.Equals(k1) && e.k2.Equals(k2) {
+ if collectStats {
+ pcm.stats.GetHits++
+ pcm.stats.GetHashConflicts++
+ }
+ return e.v, true
+ }
+ if collectStats {
+ pcm.stats.GetMisses++
+ }
+ }
+ if collectStats {
+ if hClash {
+ pcm.stats.GetHashConflicts++
+ }
+ pcm.stats.GetNoEnt++
+ }
+ return nil, false
+}
+
+func (pcm *JPCMap2) Put(k1, k2, v *PredictionContext) (*PredictionContext, bool) {
+ if collectStats {
+ pcm.stats.Puts++
+ }
+ h := dHash(k1, k2)
+ var hClash bool
+ for _, e := range pcm.store[h] {
+ hClash = true
+ if e.k1.Equals(k1) && e.k2.Equals(k2) {
+ if collectStats {
+ pcm.stats.PutHits++
+ pcm.stats.PutHashConflicts++
+ }
+ return e.v, true
+ }
+ if collectStats {
+ pcm.stats.PutMisses++
+ }
+ }
+ if collectStats {
+ if hClash {
+ pcm.stats.PutHashConflicts++
+ }
+ }
+ pcm.store[h] = append(pcm.store[h], JPCEntry{k1, k2, v})
+ pcm.size++
+ if collectStats {
+ pcm.stats.CurSize = pcm.size
+ if pcm.size > pcm.stats.MaxSize {
+ pcm.stats.MaxSize = pcm.size
+ }
+ }
+ return nil, false
+}
+
+type VisitEntry struct {
+ k *PredictionContext
+ v *PredictionContext
+}
+type VisitRecord struct {
+ store map[*PredictionContext]*PredictionContext
+ len int
+ stats *JStatRec
+}
+
+type VisitList struct {
+ cache *list.List
+ lock sync.RWMutex
+}
+
+var visitListPool = VisitList{
+ cache: list.New(),
+ lock: sync.RWMutex{},
+}
+
+// NewVisitRecord returns a new VisitRecord instance from the pool if available.
+// Note that this "map" uses a pointer as a key because we are emulating the behavior of
+// IdentityHashMap in Java, which uses the `==` operator to compare whether the keys are equal,
+// which means is the key the same reference to an object rather than is it .equals() to another
+// object.
+func NewVisitRecord() *VisitRecord {
+ visitListPool.lock.Lock()
+ el := visitListPool.cache.Front()
+ defer visitListPool.lock.Unlock()
+ var vr *VisitRecord
+ if el == nil {
+ vr = &VisitRecord{
+ store: make(map[*PredictionContext]*PredictionContext),
+ }
+ if collectStats {
+ vr.stats = &JStatRec{
+ Source: PredictionContextCacheCollection,
+ Description: "VisitRecord",
+ }
+ // Track where we created it from if we are being asked to do so
+ if runtimeConfig.statsTraceStacks {
+ vr.stats.CreateStack = debug.Stack()
+ }
+ }
+ } else {
+ vr = el.Value.(*VisitRecord)
+ visitListPool.cache.Remove(el)
+ vr.store = make(map[*PredictionContext]*PredictionContext)
+ }
+ if collectStats {
+ Statistics.AddJStatRec(vr.stats)
+ }
+ return vr
+}
+
+func (vr *VisitRecord) Release() {
+ vr.len = 0
+ vr.store = nil
+ if collectStats {
+ vr.stats.MaxSize = 0
+ vr.stats.CurSize = 0
+ vr.stats.Gets = 0
+ vr.stats.GetHits = 0
+ vr.stats.GetMisses = 0
+ vr.stats.GetHashConflicts = 0
+ vr.stats.GetNoEnt = 0
+ vr.stats.Puts = 0
+ vr.stats.PutHits = 0
+ vr.stats.PutMisses = 0
+ vr.stats.PutHashConflicts = 0
+ vr.stats.MaxSlotSize = 0
+ }
+ visitListPool.lock.Lock()
+ visitListPool.cache.PushBack(vr)
+ visitListPool.lock.Unlock()
+}
+
+func (vr *VisitRecord) Get(k *PredictionContext) (*PredictionContext, bool) {
+ if collectStats {
+ vr.stats.Gets++
+ }
+ v := vr.store[k]
+ if v != nil {
+ if collectStats {
+ vr.stats.GetHits++
+ }
+ return v, true
+ }
+ if collectStats {
+ vr.stats.GetNoEnt++
+ }
+ return nil, false
+}
+
+func (vr *VisitRecord) Put(k, v *PredictionContext) (*PredictionContext, bool) {
+ if collectStats {
+ vr.stats.Puts++
+ }
+ vr.store[k] = v
+ vr.len++
+ if collectStats {
+ vr.stats.CurSize = vr.len
+ if vr.len > vr.stats.MaxSize {
+ vr.stats.MaxSize = vr.len
+ }
+ }
+ return v, false
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/lexer.go b/vendor/github.com/antlr4-go/antlr/v4/lexer.go
new file mode 100644
index 000000000..3c7896a91
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/lexer.go
@@ -0,0 +1,426 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// A lexer is recognizer that draws input symbols from a character stream.
+// lexer grammars result in a subclass of this object. A Lexer object
+// uses simplified Match() and error recovery mechanisms in the interest
+// of speed.
+///
+
+type Lexer interface {
+ TokenSource
+ Recognizer
+
+ Emit() Token
+
+ SetChannel(int)
+ PushMode(int)
+ PopMode() int
+ SetType(int)
+ SetMode(int)
+}
+
+type BaseLexer struct {
+ *BaseRecognizer
+
+ Interpreter ILexerATNSimulator
+ TokenStartCharIndex int
+ TokenStartLine int
+ TokenStartColumn int
+ ActionType int
+ Virt Lexer // The most derived lexer implementation. Allows virtual method calls.
+
+ input CharStream
+ factory TokenFactory
+ tokenFactorySourcePair *TokenSourceCharStreamPair
+ token Token
+ hitEOF bool
+ channel int
+ thetype int
+ modeStack IntStack
+ mode int
+ text string
+}
+
+func NewBaseLexer(input CharStream) *BaseLexer {
+
+ lexer := new(BaseLexer)
+
+ lexer.BaseRecognizer = NewBaseRecognizer()
+
+ lexer.input = input
+ lexer.factory = CommonTokenFactoryDEFAULT
+ lexer.tokenFactorySourcePair = &TokenSourceCharStreamPair{lexer, input}
+
+ lexer.Virt = lexer
+
+ lexer.Interpreter = nil // child classes must populate it
+
+ // The goal of all lexer rules/methods is to create a token object.
+ // l is an instance variable as multiple rules may collaborate to
+ // create a single token. NextToken will return l object after
+ // Matching lexer rule(s). If you subclass to allow multiple token
+ // emissions, then set l to the last token to be Matched or
+ // something non nil so that the auto token emit mechanism will not
+ // emit another token.
+ lexer.token = nil
+
+ // What character index in the stream did the current token start at?
+ // Needed, for example, to get the text for current token. Set at
+ // the start of NextToken.
+ lexer.TokenStartCharIndex = -1
+
+ // The line on which the first character of the token resides///
+ lexer.TokenStartLine = -1
+
+ // The character position of first character within the line///
+ lexer.TokenStartColumn = -1
+
+ // Once we see EOF on char stream, next token will be EOF.
+ // If you have DONE : EOF then you see DONE EOF.
+ lexer.hitEOF = false
+
+ // The channel number for the current token///
+ lexer.channel = TokenDefaultChannel
+
+ // The token type for the current token///
+ lexer.thetype = TokenInvalidType
+
+ lexer.modeStack = make([]int, 0)
+ lexer.mode = LexerDefaultMode
+
+ // You can set the text for the current token to override what is in
+ // the input char buffer. Use setText() or can set l instance var.
+ // /
+ lexer.text = ""
+
+ return lexer
+}
+
+const (
+ LexerDefaultMode = 0
+ LexerMore = -2
+ LexerSkip = -3
+)
+
+//goland:noinspection GoUnusedConst
+const (
+ LexerDefaultTokenChannel = TokenDefaultChannel
+ LexerHidden = TokenHiddenChannel
+ LexerMinCharValue = 0x0000
+ LexerMaxCharValue = 0x10FFFF
+)
+
+func (b *BaseLexer) Reset() {
+ // wack Lexer state variables
+ if b.input != nil {
+ b.input.Seek(0) // rewind the input
+ }
+ b.token = nil
+ b.thetype = TokenInvalidType
+ b.channel = TokenDefaultChannel
+ b.TokenStartCharIndex = -1
+ b.TokenStartColumn = -1
+ b.TokenStartLine = -1
+ b.text = ""
+
+ b.hitEOF = false
+ b.mode = LexerDefaultMode
+ b.modeStack = make([]int, 0)
+
+ b.Interpreter.reset()
+}
+
+func (b *BaseLexer) GetInterpreter() ILexerATNSimulator {
+ return b.Interpreter
+}
+
+func (b *BaseLexer) GetInputStream() CharStream {
+ return b.input
+}
+
+func (b *BaseLexer) GetSourceName() string {
+ return b.GrammarFileName
+}
+
+func (b *BaseLexer) SetChannel(v int) {
+ b.channel = v
+}
+
+func (b *BaseLexer) GetTokenFactory() TokenFactory {
+ return b.factory
+}
+
+func (b *BaseLexer) setTokenFactory(f TokenFactory) {
+ b.factory = f
+}
+
+func (b *BaseLexer) safeMatch() (ret int) {
+ defer func() {
+ if e := recover(); e != nil {
+ if re, ok := e.(RecognitionException); ok {
+ b.notifyListeners(re) // Report error
+ b.Recover(re)
+ ret = LexerSkip // default
+ }
+ }
+ }()
+
+ return b.Interpreter.Match(b.input, b.mode)
+}
+
+// NextToken returns a token from the lexer input source i.e., Match a token on the source char stream.
+func (b *BaseLexer) NextToken() Token {
+ if b.input == nil {
+ panic("NextToken requires a non-nil input stream.")
+ }
+
+ tokenStartMarker := b.input.Mark()
+
+ // previously in finally block
+ defer func() {
+ // make sure we release marker after Match or
+ // unbuffered char stream will keep buffering
+ b.input.Release(tokenStartMarker)
+ }()
+
+ for {
+ if b.hitEOF {
+ b.EmitEOF()
+ return b.token
+ }
+ b.token = nil
+ b.channel = TokenDefaultChannel
+ b.TokenStartCharIndex = b.input.Index()
+ b.TokenStartColumn = b.Interpreter.GetCharPositionInLine()
+ b.TokenStartLine = b.Interpreter.GetLine()
+ b.text = ""
+ continueOuter := false
+ for {
+ b.thetype = TokenInvalidType
+
+ ttype := b.safeMatch()
+
+ if b.input.LA(1) == TokenEOF {
+ b.hitEOF = true
+ }
+ if b.thetype == TokenInvalidType {
+ b.thetype = ttype
+ }
+ if b.thetype == LexerSkip {
+ continueOuter = true
+ break
+ }
+ if b.thetype != LexerMore {
+ break
+ }
+ }
+
+ if continueOuter {
+ continue
+ }
+ if b.token == nil {
+ b.Virt.Emit()
+ }
+ return b.token
+ }
+}
+
+// Skip instructs the lexer to Skip creating a token for current lexer rule
+// and look for another token. [NextToken] knows to keep looking when
+// a lexer rule finishes with token set to [SKIPTOKEN]. Recall that
+// if token==nil at end of any token rule, it creates one for you
+// and emits it.
+func (b *BaseLexer) Skip() {
+ b.thetype = LexerSkip
+}
+
+func (b *BaseLexer) More() {
+ b.thetype = LexerMore
+}
+
+// SetMode changes the lexer to a new mode. The lexer will use this mode from hereon in and the rules for that mode
+// will be in force.
+func (b *BaseLexer) SetMode(m int) {
+ b.mode = m
+}
+
+// PushMode saves the current lexer mode so that it can be restored later. See [PopMode], then sets the
+// current lexer mode to the supplied mode m.
+func (b *BaseLexer) PushMode(m int) {
+ if runtimeConfig.lexerATNSimulatorDebug {
+ fmt.Println("pushMode " + strconv.Itoa(m))
+ }
+ b.modeStack.Push(b.mode)
+ b.mode = m
+}
+
+// PopMode restores the lexer mode saved by a call to [PushMode]. It is a panic error if there is no saved mode to
+// return to.
+func (b *BaseLexer) PopMode() int {
+ if len(b.modeStack) == 0 {
+ panic("Empty Stack")
+ }
+ if runtimeConfig.lexerATNSimulatorDebug {
+ fmt.Println("popMode back to " + fmt.Sprint(b.modeStack[0:len(b.modeStack)-1]))
+ }
+ i, _ := b.modeStack.Pop()
+ b.mode = i
+ return b.mode
+}
+
+func (b *BaseLexer) inputStream() CharStream {
+ return b.input
+}
+
+// SetInputStream resets the lexer input stream and associated lexer state.
+func (b *BaseLexer) SetInputStream(input CharStream) {
+ b.input = nil
+ b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input}
+ b.Reset()
+ b.input = input
+ b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input}
+}
+
+func (b *BaseLexer) GetTokenSourceCharStreamPair() *TokenSourceCharStreamPair {
+ return b.tokenFactorySourcePair
+}
+
+// EmitToken by default does not support multiple emits per [NextToken] invocation
+// for efficiency reasons. Subclass and override this func, [NextToken],
+// and [GetToken] (to push tokens into a list and pull from that list
+// rather than a single variable as this implementation does).
+func (b *BaseLexer) EmitToken(token Token) {
+ b.token = token
+}
+
+// Emit is the standard method called to automatically emit a token at the
+// outermost lexical rule. The token object should point into the
+// char buffer start..stop. If there is a text override in 'text',
+// use that to set the token's text. Override this method to emit
+// custom [Token] objects or provide a new factory.
+// /
+func (b *BaseLexer) Emit() Token {
+ t := b.factory.Create(b.tokenFactorySourcePair, b.thetype, b.text, b.channel, b.TokenStartCharIndex, b.GetCharIndex()-1, b.TokenStartLine, b.TokenStartColumn)
+ b.EmitToken(t)
+ return t
+}
+
+// EmitEOF emits an EOF token. By default, this is the last token emitted
+func (b *BaseLexer) EmitEOF() Token {
+ cpos := b.GetCharPositionInLine()
+ lpos := b.GetLine()
+ eof := b.factory.Create(b.tokenFactorySourcePair, TokenEOF, "", TokenDefaultChannel, b.input.Index(), b.input.Index()-1, lpos, cpos)
+ b.EmitToken(eof)
+ return eof
+}
+
+// GetCharPositionInLine returns the current position in the current line as far as the lexer is concerned.
+func (b *BaseLexer) GetCharPositionInLine() int {
+ return b.Interpreter.GetCharPositionInLine()
+}
+
+func (b *BaseLexer) GetLine() int {
+ return b.Interpreter.GetLine()
+}
+
+func (b *BaseLexer) GetType() int {
+ return b.thetype
+}
+
+func (b *BaseLexer) SetType(t int) {
+ b.thetype = t
+}
+
+// GetCharIndex returns the index of the current character of lookahead
+func (b *BaseLexer) GetCharIndex() int {
+ return b.input.Index()
+}
+
+// GetText returns the text Matched so far for the current token or any text override.
+func (b *BaseLexer) GetText() string {
+ if b.text != "" {
+ return b.text
+ }
+
+ return b.Interpreter.GetText(b.input)
+}
+
+// SetText sets the complete text of this token; it wipes any previous changes to the text.
+func (b *BaseLexer) SetText(text string) {
+ b.text = text
+}
+
+// GetATN returns the ATN used by the lexer.
+func (b *BaseLexer) GetATN() *ATN {
+ return b.Interpreter.ATN()
+}
+
+// GetAllTokens returns a list of all [Token] objects in input char stream.
+// Forces a load of all tokens that can be made from the input char stream.
+//
+// Does not include EOF token.
+func (b *BaseLexer) GetAllTokens() []Token {
+ vl := b.Virt
+ tokens := make([]Token, 0)
+ t := vl.NextToken()
+ for t.GetTokenType() != TokenEOF {
+ tokens = append(tokens, t)
+ t = vl.NextToken()
+ }
+ return tokens
+}
+
+func (b *BaseLexer) notifyListeners(e RecognitionException) {
+ start := b.TokenStartCharIndex
+ stop := b.input.Index()
+ text := b.input.GetTextFromInterval(NewInterval(start, stop))
+ msg := "token recognition error at: '" + text + "'"
+ listener := b.GetErrorListenerDispatch()
+ listener.SyntaxError(b, nil, b.TokenStartLine, b.TokenStartColumn, msg, e)
+}
+
+func (b *BaseLexer) getErrorDisplayForChar(c rune) string {
+ if c == TokenEOF {
+ return ""
+ } else if c == '\n' {
+ return "\\n"
+ } else if c == '\t' {
+ return "\\t"
+ } else if c == '\r' {
+ return "\\r"
+ } else {
+ return string(c)
+ }
+}
+
+func (b *BaseLexer) getCharErrorDisplay(c rune) string {
+ return "'" + b.getErrorDisplayForChar(c) + "'"
+}
+
+// Recover can normally Match any char in its vocabulary after Matching
+// a token, so here we do the easy thing and just kill a character and hope
+// it all works out. You can instead use the rule invocation stack
+// to do sophisticated error recovery if you are in a fragment rule.
+//
+// In general, lexers should not need to recover and should have rules that cover any eventuality, such as
+// a character that makes no sense to the recognizer.
+func (b *BaseLexer) Recover(re RecognitionException) {
+ if b.input.LA(1) != TokenEOF {
+ if _, ok := re.(*LexerNoViableAltException); ok {
+ // Skip a char and try again
+ b.Interpreter.Consume(b.input)
+ } else {
+ // TODO: Do we lose character or line position information?
+ b.input.Consume()
+ }
+ }
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/lexer_action.go b/vendor/github.com/antlr4-go/antlr/v4/lexer_action.go
new file mode 100644
index 000000000..eaa7393e0
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/lexer_action.go
@@ -0,0 +1,452 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import "strconv"
+
+const (
+ // LexerActionTypeChannel represents a [LexerChannelAction] action.
+ LexerActionTypeChannel = 0
+
+ // LexerActionTypeCustom represents a [LexerCustomAction] action.
+ LexerActionTypeCustom = 1
+
+ // LexerActionTypeMode represents a [LexerModeAction] action.
+ LexerActionTypeMode = 2
+
+ // LexerActionTypeMore represents a [LexerMoreAction] action.
+ LexerActionTypeMore = 3
+
+ // LexerActionTypePopMode represents a [LexerPopModeAction] action.
+ LexerActionTypePopMode = 4
+
+ // LexerActionTypePushMode represents a [LexerPushModeAction] action.
+ LexerActionTypePushMode = 5
+
+ // LexerActionTypeSkip represents a [LexerSkipAction] action.
+ LexerActionTypeSkip = 6
+
+ // LexerActionTypeType represents a [LexerTypeAction] action.
+ LexerActionTypeType = 7
+)
+
+type LexerAction interface {
+ getActionType() int
+ getIsPositionDependent() bool
+ execute(lexer Lexer)
+ Hash() int
+ Equals(other LexerAction) bool
+}
+
+type BaseLexerAction struct {
+ actionType int
+ isPositionDependent bool
+}
+
+func NewBaseLexerAction(action int) *BaseLexerAction {
+ la := new(BaseLexerAction)
+
+ la.actionType = action
+ la.isPositionDependent = false
+
+ return la
+}
+
+func (b *BaseLexerAction) execute(_ Lexer) {
+ panic("Not implemented")
+}
+
+func (b *BaseLexerAction) getActionType() int {
+ return b.actionType
+}
+
+func (b *BaseLexerAction) getIsPositionDependent() bool {
+ return b.isPositionDependent
+}
+
+func (b *BaseLexerAction) Hash() int {
+ h := murmurInit(0)
+ h = murmurUpdate(h, b.actionType)
+ return murmurFinish(h, 1)
+}
+
+func (b *BaseLexerAction) Equals(other LexerAction) bool {
+ return b.actionType == other.getActionType()
+}
+
+// LexerSkipAction implements the [BaseLexerAction.Skip] lexer action by calling [Lexer.Skip].
+//
+// The Skip command does not have any parameters, so this action is
+// implemented as a singleton instance exposed by the [LexerSkipActionINSTANCE].
+type LexerSkipAction struct {
+ *BaseLexerAction
+}
+
+func NewLexerSkipAction() *LexerSkipAction {
+ la := new(LexerSkipAction)
+ la.BaseLexerAction = NewBaseLexerAction(LexerActionTypeSkip)
+ return la
+}
+
+// LexerSkipActionINSTANCE provides a singleton instance of this parameterless lexer action.
+var LexerSkipActionINSTANCE = NewLexerSkipAction()
+
+func (l *LexerSkipAction) execute(lexer Lexer) {
+ lexer.Skip()
+}
+
+// String returns a string representation of the current [LexerSkipAction].
+func (l *LexerSkipAction) String() string {
+ return "skip"
+}
+
+func (b *LexerSkipAction) Equals(other LexerAction) bool {
+ return other.getActionType() == LexerActionTypeSkip
+}
+
+// Implements the {@code type} lexer action by calling {@link Lexer//setType}
+//
+// with the assigned type.
+type LexerTypeAction struct {
+ *BaseLexerAction
+
+ thetype int
+}
+
+func NewLexerTypeAction(thetype int) *LexerTypeAction {
+ l := new(LexerTypeAction)
+ l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeType)
+ l.thetype = thetype
+ return l
+}
+
+func (l *LexerTypeAction) execute(lexer Lexer) {
+ lexer.SetType(l.thetype)
+}
+
+func (l *LexerTypeAction) Hash() int {
+ h := murmurInit(0)
+ h = murmurUpdate(h, l.actionType)
+ h = murmurUpdate(h, l.thetype)
+ return murmurFinish(h, 2)
+}
+
+func (l *LexerTypeAction) Equals(other LexerAction) bool {
+ if l == other {
+ return true
+ } else if _, ok := other.(*LexerTypeAction); !ok {
+ return false
+ } else {
+ return l.thetype == other.(*LexerTypeAction).thetype
+ }
+}
+
+func (l *LexerTypeAction) String() string {
+ return "actionType(" + strconv.Itoa(l.thetype) + ")"
+}
+
+// LexerPushModeAction implements the pushMode lexer action by calling
+// [Lexer.pushMode] with the assigned mode.
+type LexerPushModeAction struct {
+ *BaseLexerAction
+ mode int
+}
+
+func NewLexerPushModeAction(mode int) *LexerPushModeAction {
+
+ l := new(LexerPushModeAction)
+ l.BaseLexerAction = NewBaseLexerAction(LexerActionTypePushMode)
+
+ l.mode = mode
+ return l
+}
+
+//
This action is implemented by calling {@link Lexer//pushMode} with the
+// value provided by {@link //getMode}.
+func (l *LexerPushModeAction) execute(lexer Lexer) {
+ lexer.PushMode(l.mode)
+}
+
+func (l *LexerPushModeAction) Hash() int {
+ h := murmurInit(0)
+ h = murmurUpdate(h, l.actionType)
+ h = murmurUpdate(h, l.mode)
+ return murmurFinish(h, 2)
+}
+
+func (l *LexerPushModeAction) Equals(other LexerAction) bool {
+ if l == other {
+ return true
+ } else if _, ok := other.(*LexerPushModeAction); !ok {
+ return false
+ } else {
+ return l.mode == other.(*LexerPushModeAction).mode
+ }
+}
+
+func (l *LexerPushModeAction) String() string {
+ return "pushMode(" + strconv.Itoa(l.mode) + ")"
+}
+
+// LexerPopModeAction implements the popMode lexer action by calling [Lexer.popMode].
+//
+// The popMode command does not have any parameters, so this action is
+// implemented as a singleton instance exposed by [LexerPopModeActionINSTANCE]
+type LexerPopModeAction struct {
+ *BaseLexerAction
+}
+
+func NewLexerPopModeAction() *LexerPopModeAction {
+
+ l := new(LexerPopModeAction)
+
+ l.BaseLexerAction = NewBaseLexerAction(LexerActionTypePopMode)
+
+ return l
+}
+
+var LexerPopModeActionINSTANCE = NewLexerPopModeAction()
+
+//
This action is implemented by calling {@link Lexer//popMode}.
This action is implemented by calling {@link Lexer//popMode}.
+func (l *LexerMoreAction) execute(lexer Lexer) {
+ lexer.More()
+}
+
+func (l *LexerMoreAction) String() string {
+ return "more"
+}
+
+// LexerModeAction implements the mode lexer action by calling [Lexer.mode] with
+// the assigned mode.
+type LexerModeAction struct {
+ *BaseLexerAction
+ mode int
+}
+
+func NewLexerModeAction(mode int) *LexerModeAction {
+ l := new(LexerModeAction)
+ l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeMode)
+ l.mode = mode
+ return l
+}
+
+//
This action is implemented by calling {@link Lexer//mode} with the
+// value provided by {@link //getMode}.
+func (l *LexerModeAction) execute(lexer Lexer) {
+ lexer.SetMode(l.mode)
+}
+
+func (l *LexerModeAction) Hash() int {
+ h := murmurInit(0)
+ h = murmurUpdate(h, l.actionType)
+ h = murmurUpdate(h, l.mode)
+ return murmurFinish(h, 2)
+}
+
+func (l *LexerModeAction) Equals(other LexerAction) bool {
+ if l == other {
+ return true
+ } else if _, ok := other.(*LexerModeAction); !ok {
+ return false
+ } else {
+ return l.mode == other.(*LexerModeAction).mode
+ }
+}
+
+func (l *LexerModeAction) String() string {
+ return "mode(" + strconv.Itoa(l.mode) + ")"
+}
+
+// Executes a custom lexer action by calling {@link Recognizer//action} with the
+// rule and action indexes assigned to the custom action. The implementation of
+// a custom action is added to the generated code for the lexer in an override
+// of {@link Recognizer//action} when the grammar is compiled.
+//
+//
This class may represent embedded actions created with the {...}
+// syntax in ANTLR 4, as well as actions created for lexer commands where the
+// command argument could not be evaluated when the grammar was compiled.
+
+// Constructs a custom lexer action with the specified rule and action
+// indexes.
+//
+// @param ruleIndex The rule index to use for calls to
+// {@link Recognizer//action}.
+// @param actionIndex The action index to use for calls to
+// {@link Recognizer//action}.
+
+type LexerCustomAction struct {
+ *BaseLexerAction
+ ruleIndex, actionIndex int
+}
+
+func NewLexerCustomAction(ruleIndex, actionIndex int) *LexerCustomAction {
+ l := new(LexerCustomAction)
+ l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeCustom)
+ l.ruleIndex = ruleIndex
+ l.actionIndex = actionIndex
+ l.isPositionDependent = true
+ return l
+}
+
+//
Custom actions are implemented by calling {@link Lexer//action} with the
+// appropriate rule and action indexes.
+func (l *LexerCustomAction) execute(lexer Lexer) {
+ lexer.Action(nil, l.ruleIndex, l.actionIndex)
+}
+
+func (l *LexerCustomAction) Hash() int {
+ h := murmurInit(0)
+ h = murmurUpdate(h, l.actionType)
+ h = murmurUpdate(h, l.ruleIndex)
+ h = murmurUpdate(h, l.actionIndex)
+ return murmurFinish(h, 3)
+}
+
+func (l *LexerCustomAction) Equals(other LexerAction) bool {
+ if l == other {
+ return true
+ } else if _, ok := other.(*LexerCustomAction); !ok {
+ return false
+ } else {
+ return l.ruleIndex == other.(*LexerCustomAction).ruleIndex &&
+ l.actionIndex == other.(*LexerCustomAction).actionIndex
+ }
+}
+
+// LexerChannelAction implements the channel lexer action by calling
+// [Lexer.setChannel] with the assigned channel.
+//
+// Constructs a new channel action with the specified channel value.
+type LexerChannelAction struct {
+ *BaseLexerAction
+ channel int
+}
+
+// NewLexerChannelAction creates a channel lexer action by calling
+// [Lexer.setChannel] with the assigned channel.
+//
+// Constructs a new channel action with the specified channel value.
+func NewLexerChannelAction(channel int) *LexerChannelAction {
+ l := new(LexerChannelAction)
+ l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeChannel)
+ l.channel = channel
+ return l
+}
+
+//
This action is implemented by calling {@link Lexer//setChannel} with the
+// value provided by {@link //getChannel}.
+func (l *LexerChannelAction) execute(lexer Lexer) {
+ lexer.SetChannel(l.channel)
+}
+
+func (l *LexerChannelAction) Hash() int {
+ h := murmurInit(0)
+ h = murmurUpdate(h, l.actionType)
+ h = murmurUpdate(h, l.channel)
+ return murmurFinish(h, 2)
+}
+
+func (l *LexerChannelAction) Equals(other LexerAction) bool {
+ if l == other {
+ return true
+ } else if _, ok := other.(*LexerChannelAction); !ok {
+ return false
+ } else {
+ return l.channel == other.(*LexerChannelAction).channel
+ }
+}
+
+func (l *LexerChannelAction) String() string {
+ return "channel(" + strconv.Itoa(l.channel) + ")"
+}
+
+// This implementation of {@link LexerAction} is used for tracking input offsets
+// for position-dependent actions within a {@link LexerActionExecutor}.
+//
+//
This action is not serialized as part of the ATN, and is only required for
+// position-dependent lexer actions which appear at a location other than the
+// end of a rule. For more information about DFA optimizations employed for
+// lexer actions, see {@link LexerActionExecutor//append} and
+// {@link LexerActionExecutor//fixOffsetBeforeMatch}.
+
+type LexerIndexedCustomAction struct {
+ *BaseLexerAction
+ offset int
+ lexerAction LexerAction
+ isPositionDependent bool
+}
+
+// NewLexerIndexedCustomAction constructs a new indexed custom action by associating a character offset
+// with a [LexerAction].
+//
+// Note: This class is only required for lexer actions for which
+// [LexerAction.isPositionDependent] returns true.
+//
+// The offset points into the input [CharStream], relative to
+// the token start index, at which the specified lexerAction should be
+// executed.
+func NewLexerIndexedCustomAction(offset int, lexerAction LexerAction) *LexerIndexedCustomAction {
+
+ l := new(LexerIndexedCustomAction)
+ l.BaseLexerAction = NewBaseLexerAction(lexerAction.getActionType())
+
+ l.offset = offset
+ l.lexerAction = lexerAction
+ l.isPositionDependent = true
+
+ return l
+}
+
+//
This method calls {@link //execute} on the result of {@link //getAction}
+// using the provided {@code lexer}.
+func (l *LexerIndexedCustomAction) execute(lexer Lexer) {
+ // assume the input stream position was properly set by the calling code
+ l.lexerAction.execute(lexer)
+}
+
+func (l *LexerIndexedCustomAction) Hash() int {
+ h := murmurInit(0)
+ h = murmurUpdate(h, l.offset)
+ h = murmurUpdate(h, l.lexerAction.Hash())
+ return murmurFinish(h, 2)
+}
+
+func (l *LexerIndexedCustomAction) equals(other LexerAction) bool {
+ if l == other {
+ return true
+ } else if _, ok := other.(*LexerIndexedCustomAction); !ok {
+ return false
+ } else {
+ return l.offset == other.(*LexerIndexedCustomAction).offset &&
+ l.lexerAction.Equals(other.(*LexerIndexedCustomAction).lexerAction)
+ }
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/lexer_action_executor.go b/vendor/github.com/antlr4-go/antlr/v4/lexer_action_executor.go
new file mode 100644
index 000000000..dfc28c32b
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/lexer_action_executor.go
@@ -0,0 +1,173 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import "golang.org/x/exp/slices"
+
+// Represents an executor for a sequence of lexer actions which traversed during
+// the Matching operation of a lexer rule (token).
+//
+//
The executor tracks position information for position-dependent lexer actions
+// efficiently, ensuring that actions appearing only at the end of the rule do
+// not cause bloating of the {@link DFA} created for the lexer.
+
+type LexerActionExecutor struct {
+ lexerActions []LexerAction
+ cachedHash int
+}
+
+func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor {
+
+ if lexerActions == nil {
+ lexerActions = make([]LexerAction, 0)
+ }
+
+ l := new(LexerActionExecutor)
+
+ l.lexerActions = lexerActions
+
+ // Caches the result of {@link //hashCode} since the hash code is an element
+ // of the performance-critical {@link ATNConfig//hashCode} operation.
+ l.cachedHash = murmurInit(0)
+ for _, a := range lexerActions {
+ l.cachedHash = murmurUpdate(l.cachedHash, a.Hash())
+ }
+ l.cachedHash = murmurFinish(l.cachedHash, len(lexerActions))
+
+ return l
+}
+
+// LexerActionExecutorappend creates a [LexerActionExecutor] which executes the actions for
+// the input [LexerActionExecutor] followed by a specified
+// [LexerAction].
+// TODO: This does not match the Java code
+func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAction LexerAction) *LexerActionExecutor {
+ if lexerActionExecutor == nil {
+ return NewLexerActionExecutor([]LexerAction{lexerAction})
+ }
+
+ return NewLexerActionExecutor(append(lexerActionExecutor.lexerActions, lexerAction))
+}
+
+// fixOffsetBeforeMatch creates a [LexerActionExecutor] which encodes the current offset
+// for position-dependent lexer actions.
+//
+// Normally, when the executor encounters lexer actions where
+// [LexerAction.isPositionDependent] returns true, it calls
+// [IntStream.Seek] on the input [CharStream] to set the input
+// position to the end of the current token. This behavior provides
+// for efficient [DFA] representation of lexer actions which appear at the end
+// of a lexer rule, even when the lexer rule Matches a variable number of
+// characters.
+//
+// Prior to traversing a Match transition in the [ATN], the current offset
+// from the token start index is assigned to all position-dependent lexer
+// actions which have not already been assigned a fixed offset. By storing
+// the offsets relative to the token start index, the [DFA] representation of
+// lexer actions which appear in the middle of tokens remains efficient due
+// to sharing among tokens of the same Length, regardless of their absolute
+// position in the input stream.
+//
+// If the current executor already has offsets assigned to all
+// position-dependent lexer actions, the method returns this instance.
+//
+// The offset is assigned to all position-dependent
+// lexer actions which do not already have offsets assigned.
+//
+// The func returns a [LexerActionExecutor] that stores input stream offsets
+// for all position-dependent lexer actions.
+func (l *LexerActionExecutor) fixOffsetBeforeMatch(offset int) *LexerActionExecutor {
+ var updatedLexerActions []LexerAction
+ for i := 0; i < len(l.lexerActions); i++ {
+ _, ok := l.lexerActions[i].(*LexerIndexedCustomAction)
+ if l.lexerActions[i].getIsPositionDependent() && !ok {
+ if updatedLexerActions == nil {
+ updatedLexerActions = make([]LexerAction, 0, len(l.lexerActions))
+ updatedLexerActions = append(updatedLexerActions, l.lexerActions...)
+ }
+ updatedLexerActions[i] = NewLexerIndexedCustomAction(offset, l.lexerActions[i])
+ }
+ }
+ if updatedLexerActions == nil {
+ return l
+ }
+
+ return NewLexerActionExecutor(updatedLexerActions)
+}
+
+// Execute the actions encapsulated by l executor within the context of a
+// particular {@link Lexer}.
+//
+//
This method calls {@link IntStream//seek} to set the position of the
+// {@code input} {@link CharStream} prior to calling
+// {@link LexerAction//execute} on a position-dependent action. Before the
+// method returns, the input position will be restored to the same position
+// it was in when the method was invoked.
+//
+// @param lexer The lexer instance.
+// @param input The input stream which is the source for the current token.
+// When l method is called, the current {@link IntStream//index} for
+// {@code input} should be the start of the following token, i.e. 1
+// character past the end of the current token.
+// @param startIndex The token start index. This value may be passed to
+// {@link IntStream//seek} to set the {@code input} position to the beginning
+// of the token.
+// /
+func (l *LexerActionExecutor) execute(lexer Lexer, input CharStream, startIndex int) {
+ requiresSeek := false
+ stopIndex := input.Index()
+
+ defer func() {
+ if requiresSeek {
+ input.Seek(stopIndex)
+ }
+ }()
+
+ for i := 0; i < len(l.lexerActions); i++ {
+ lexerAction := l.lexerActions[i]
+ if la, ok := lexerAction.(*LexerIndexedCustomAction); ok {
+ offset := la.offset
+ input.Seek(startIndex + offset)
+ lexerAction = la.lexerAction
+ requiresSeek = (startIndex + offset) != stopIndex
+ } else if lexerAction.getIsPositionDependent() {
+ input.Seek(stopIndex)
+ requiresSeek = false
+ }
+ lexerAction.execute(lexer)
+ }
+}
+
+func (l *LexerActionExecutor) Hash() int {
+ if l == nil {
+ // TODO: Why is this here? l should not be nil
+ return 61
+ }
+
+ // TODO: This is created from the action itself when the struct is created - will this be an issue at some point? Java uses the runtime assign hashcode
+ return l.cachedHash
+}
+
+func (l *LexerActionExecutor) Equals(other interface{}) bool {
+ if l == other {
+ return true
+ }
+ othert, ok := other.(*LexerActionExecutor)
+ if !ok {
+ return false
+ }
+ if othert == nil {
+ return false
+ }
+ if l.cachedHash != othert.cachedHash {
+ return false
+ }
+ if len(l.lexerActions) != len(othert.lexerActions) {
+ return false
+ }
+ return slices.EqualFunc(l.lexerActions, othert.lexerActions, func(i, j LexerAction) bool {
+ return i.Equals(j)
+ })
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/lexer_atn_simulator.go b/vendor/github.com/antlr4-go/antlr/v4/lexer_atn_simulator.go
new file mode 100644
index 000000000..fe938b025
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/lexer_atn_simulator.go
@@ -0,0 +1,677 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+//goland:noinspection GoUnusedGlobalVariable
+var (
+ LexerATNSimulatorMinDFAEdge = 0
+ LexerATNSimulatorMaxDFAEdge = 127 // forces unicode to stay in ATN
+
+ LexerATNSimulatorMatchCalls = 0
+)
+
+type ILexerATNSimulator interface {
+ IATNSimulator
+
+ reset()
+ Match(input CharStream, mode int) int
+ GetCharPositionInLine() int
+ GetLine() int
+ GetText(input CharStream) string
+ Consume(input CharStream)
+}
+
+type LexerATNSimulator struct {
+ BaseATNSimulator
+
+ recog Lexer
+ predictionMode int
+ mergeCache *JPCMap2
+ startIndex int
+ Line int
+ CharPositionInLine int
+ mode int
+ prevAccept *SimState
+ MatchCalls int
+}
+
+func NewLexerATNSimulator(recog Lexer, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *LexerATNSimulator {
+ l := &LexerATNSimulator{
+ BaseATNSimulator: BaseATNSimulator{
+ atn: atn,
+ sharedContextCache: sharedContextCache,
+ },
+ }
+
+ l.decisionToDFA = decisionToDFA
+ l.recog = recog
+
+ // The current token's starting index into the character stream.
+ // Shared across DFA to ATN simulation in case the ATN fails and the
+ // DFA did not have a previous accept state. In l case, we use the
+ // ATN-generated exception object.
+ l.startIndex = -1
+
+ // line number 1..n within the input
+ l.Line = 1
+
+ // The index of the character relative to the beginning of the line
+ // 0..n-1
+ l.CharPositionInLine = 0
+
+ l.mode = LexerDefaultMode
+
+ // Used during DFA/ATN exec to record the most recent accept configuration
+ // info
+ l.prevAccept = NewSimState()
+
+ return l
+}
+
+func (l *LexerATNSimulator) copyState(simulator *LexerATNSimulator) {
+ l.CharPositionInLine = simulator.CharPositionInLine
+ l.Line = simulator.Line
+ l.mode = simulator.mode
+ l.startIndex = simulator.startIndex
+}
+
+func (l *LexerATNSimulator) Match(input CharStream, mode int) int {
+ l.MatchCalls++
+ l.mode = mode
+ mark := input.Mark()
+
+ defer func() {
+ input.Release(mark)
+ }()
+
+ l.startIndex = input.Index()
+ l.prevAccept.reset()
+
+ dfa := l.decisionToDFA[mode]
+
+ var s0 *DFAState
+ l.atn.stateMu.RLock()
+ s0 = dfa.getS0()
+ l.atn.stateMu.RUnlock()
+
+ if s0 == nil {
+ return l.MatchATN(input)
+ }
+
+ return l.execATN(input, s0)
+}
+
+func (l *LexerATNSimulator) reset() {
+ l.prevAccept.reset()
+ l.startIndex = -1
+ l.Line = 1
+ l.CharPositionInLine = 0
+ l.mode = LexerDefaultMode
+}
+
+func (l *LexerATNSimulator) MatchATN(input CharStream) int {
+ startState := l.atn.modeToStartState[l.mode]
+
+ if runtimeConfig.lexerATNSimulatorDebug {
+ fmt.Println("MatchATN mode " + strconv.Itoa(l.mode) + " start: " + startState.String())
+ }
+ oldMode := l.mode
+ s0Closure := l.computeStartState(input, startState)
+ suppressEdge := s0Closure.hasSemanticContext
+ s0Closure.hasSemanticContext = false
+
+ next := l.addDFAState(s0Closure, suppressEdge)
+
+ predict := l.execATN(input, next)
+
+ if runtimeConfig.lexerATNSimulatorDebug {
+ fmt.Println("DFA after MatchATN: " + l.decisionToDFA[oldMode].ToLexerString())
+ }
+ return predict
+}
+
+func (l *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int {
+
+ if runtimeConfig.lexerATNSimulatorDebug {
+ fmt.Println("start state closure=" + ds0.configs.String())
+ }
+ if ds0.isAcceptState {
+ // allow zero-Length tokens
+ l.captureSimState(l.prevAccept, input, ds0)
+ }
+ t := input.LA(1)
+ s := ds0 // s is current/from DFA state
+
+ for { // while more work
+ if runtimeConfig.lexerATNSimulatorDebug {
+ fmt.Println("execATN loop starting closure: " + s.configs.String())
+ }
+
+ // As we move src->trg, src->trg, we keep track of the previous trg to
+ // avoid looking up the DFA state again, which is expensive.
+ // If the previous target was already part of the DFA, we might
+ // be able to avoid doing a reach operation upon t. If s!=nil,
+ // it means that semantic predicates didn't prevent us from
+ // creating a DFA state. Once we know s!=nil, we check to see if
+ // the DFA state has an edge already for t. If so, we can just reuse
+ // it's configuration set there's no point in re-computing it.
+ // This is kind of like doing DFA simulation within the ATN
+ // simulation because DFA simulation is really just a way to avoid
+ // computing reach/closure sets. Technically, once we know that
+ // we have a previously added DFA state, we could jump over to
+ // the DFA simulator. But, that would mean popping back and forth
+ // a lot and making things more complicated algorithmically.
+ // This optimization makes a lot of sense for loops within DFA.
+ // A character will take us back to an existing DFA state
+ // that already has lots of edges out of it. e.g., .* in comments.
+ target := l.getExistingTargetState(s, t)
+ if target == nil {
+ target = l.computeTargetState(input, s, t)
+ // print("Computed:" + str(target))
+ }
+ if target == ATNSimulatorError {
+ break
+ }
+ // If l is a consumable input element, make sure to consume before
+ // capturing the accept state so the input index, line, and char
+ // position accurately reflect the state of the interpreter at the
+ // end of the token.
+ if t != TokenEOF {
+ l.Consume(input)
+ }
+ if target.isAcceptState {
+ l.captureSimState(l.prevAccept, input, target)
+ if t == TokenEOF {
+ break
+ }
+ }
+ t = input.LA(1)
+ s = target // flip current DFA target becomes new src/from state
+ }
+
+ return l.failOrAccept(l.prevAccept, input, s.configs, t)
+}
+
+// Get an existing target state for an edge in the DFA. If the target state
+// for the edge has not yet been computed or is otherwise not available,
+// l method returns {@code nil}.
+//
+// @param s The current DFA state
+// @param t The next input symbol
+// @return The existing target DFA state for the given input symbol
+// {@code t}, or {@code nil} if the target state for l edge is not
+// already cached
+func (l *LexerATNSimulator) getExistingTargetState(s *DFAState, t int) *DFAState {
+ if t < LexerATNSimulatorMinDFAEdge || t > LexerATNSimulatorMaxDFAEdge {
+ return nil
+ }
+
+ l.atn.edgeMu.RLock()
+ defer l.atn.edgeMu.RUnlock()
+ if s.getEdges() == nil {
+ return nil
+ }
+ target := s.getIthEdge(t - LexerATNSimulatorMinDFAEdge)
+ if runtimeConfig.lexerATNSimulatorDebug && target != nil {
+ fmt.Println("reuse state " + strconv.Itoa(s.stateNumber) + " edge to " + strconv.Itoa(target.stateNumber))
+ }
+ return target
+}
+
+// computeTargetState computes a target state for an edge in the [DFA], and attempt to add the
+// computed state and corresponding edge to the [DFA].
+//
+// The func returns the computed target [DFA] state for the given input symbol t.
+// If this does not lead to a valid [DFA] state, this method
+// returns ATNSimulatorError.
+func (l *LexerATNSimulator) computeTargetState(input CharStream, s *DFAState, t int) *DFAState {
+ reach := NewOrderedATNConfigSet()
+
+ // if we don't find an existing DFA state
+ // Fill reach starting from closure, following t transitions
+ l.getReachableConfigSet(input, s.configs, reach, t)
+
+ if len(reach.configs) == 0 { // we got nowhere on t from s
+ if !reach.hasSemanticContext {
+ // we got nowhere on t, don't panic out l knowledge it'd
+ // cause a fail-over from DFA later.
+ l.addDFAEdge(s, t, ATNSimulatorError, nil)
+ }
+ // stop when we can't Match any more char
+ return ATNSimulatorError
+ }
+ // Add an edge from s to target DFA found/created for reach
+ return l.addDFAEdge(s, t, nil, reach)
+}
+
+func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, reach *ATNConfigSet, t int) int {
+ if l.prevAccept.dfaState != nil {
+ lexerActionExecutor := prevAccept.dfaState.lexerActionExecutor
+ l.accept(input, lexerActionExecutor, l.startIndex, prevAccept.index, prevAccept.line, prevAccept.column)
+ return prevAccept.dfaState.prediction
+ }
+
+ // if no accept and EOF is first char, return EOF
+ if t == TokenEOF && input.Index() == l.startIndex {
+ return TokenEOF
+ }
+
+ panic(NewLexerNoViableAltException(l.recog, input, l.startIndex, reach))
+}
+
+// getReachableConfigSet when given a starting configuration set, figures out all [ATN] configurations
+// we can reach upon input t.
+//
+// Parameter reach is a return parameter.
+func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure *ATNConfigSet, reach *ATNConfigSet, t int) {
+ // l is used to Skip processing for configs which have a lower priority
+ // than a runtimeConfig that already reached an accept state for the same rule
+ SkipAlt := ATNInvalidAltNumber
+
+ for _, cfg := range closure.configs {
+ currentAltReachedAcceptState := cfg.GetAlt() == SkipAlt
+ if currentAltReachedAcceptState && cfg.passedThroughNonGreedyDecision {
+ continue
+ }
+
+ if runtimeConfig.lexerATNSimulatorDebug {
+
+ fmt.Printf("testing %s at %s\n", l.GetTokenName(t), cfg.String())
+ }
+
+ for _, trans := range cfg.GetState().GetTransitions() {
+ target := l.getReachableTarget(trans, t)
+ if target != nil {
+ lexerActionExecutor := cfg.lexerActionExecutor
+ if lexerActionExecutor != nil {
+ lexerActionExecutor = lexerActionExecutor.fixOffsetBeforeMatch(input.Index() - l.startIndex)
+ }
+ treatEOFAsEpsilon := t == TokenEOF
+ config := NewLexerATNConfig3(cfg, target, lexerActionExecutor)
+ if l.closure(input, config, reach,
+ currentAltReachedAcceptState, true, treatEOFAsEpsilon) {
+ // any remaining configs for l alt have a lower priority
+ // than the one that just reached an accept state.
+ SkipAlt = cfg.GetAlt()
+ }
+ }
+ }
+ }
+}
+
+func (l *LexerATNSimulator) accept(input CharStream, lexerActionExecutor *LexerActionExecutor, startIndex, index, line, charPos int) {
+ if runtimeConfig.lexerATNSimulatorDebug {
+ fmt.Printf("ACTION %v\n", lexerActionExecutor)
+ }
+ // seek to after last char in token
+ input.Seek(index)
+ l.Line = line
+ l.CharPositionInLine = charPos
+ if lexerActionExecutor != nil && l.recog != nil {
+ lexerActionExecutor.execute(l.recog, input, startIndex)
+ }
+}
+
+func (l *LexerATNSimulator) getReachableTarget(trans Transition, t int) ATNState {
+ if trans.Matches(t, 0, LexerMaxCharValue) {
+ return trans.getTarget()
+ }
+
+ return nil
+}
+
+func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *ATNConfigSet {
+ configs := NewOrderedATNConfigSet()
+ for i := 0; i < len(p.GetTransitions()); i++ {
+ target := p.GetTransitions()[i].getTarget()
+ cfg := NewLexerATNConfig6(target, i+1, BasePredictionContextEMPTY)
+ l.closure(input, cfg, configs, false, false, false)
+ }
+
+ return configs
+}
+
+// closure since the alternatives within any lexer decision are ordered by
+// preference, this method stops pursuing the closure as soon as an accept
+// state is reached. After the first accept state is reached by depth-first
+// search from runtimeConfig, all other (potentially reachable) states for
+// this rule would have a lower priority.
+//
+// The func returns true if an accept state is reached.
+func (l *LexerATNSimulator) closure(input CharStream, config *ATNConfig, configs *ATNConfigSet,
+ currentAltReachedAcceptState, speculative, treatEOFAsEpsilon bool) bool {
+
+ if runtimeConfig.lexerATNSimulatorDebug {
+ fmt.Println("closure(" + config.String() + ")")
+ }
+
+ _, ok := config.state.(*RuleStopState)
+ if ok {
+
+ if runtimeConfig.lexerATNSimulatorDebug {
+ if l.recog != nil {
+ fmt.Printf("closure at %s rule stop %s\n", l.recog.GetRuleNames()[config.state.GetRuleIndex()], config)
+ } else {
+ fmt.Printf("closure at rule stop %s\n", config)
+ }
+ }
+
+ if config.context == nil || config.context.hasEmptyPath() {
+ if config.context == nil || config.context.isEmpty() {
+ configs.Add(config, nil)
+ return true
+ }
+
+ configs.Add(NewLexerATNConfig2(config, config.state, BasePredictionContextEMPTY), nil)
+ currentAltReachedAcceptState = true
+ }
+ if config.context != nil && !config.context.isEmpty() {
+ for i := 0; i < config.context.length(); i++ {
+ if config.context.getReturnState(i) != BasePredictionContextEmptyReturnState {
+ newContext := config.context.GetParent(i) // "pop" return state
+ returnState := l.atn.states[config.context.getReturnState(i)]
+ cfg := NewLexerATNConfig2(config, returnState, newContext)
+ currentAltReachedAcceptState = l.closure(input, cfg, configs, currentAltReachedAcceptState, speculative, treatEOFAsEpsilon)
+ }
+ }
+ }
+ return currentAltReachedAcceptState
+ }
+ // optimization
+ if !config.state.GetEpsilonOnlyTransitions() {
+ if !currentAltReachedAcceptState || !config.passedThroughNonGreedyDecision {
+ configs.Add(config, nil)
+ }
+ }
+ for j := 0; j < len(config.state.GetTransitions()); j++ {
+ trans := config.state.GetTransitions()[j]
+ cfg := l.getEpsilonTarget(input, config, trans, configs, speculative, treatEOFAsEpsilon)
+ if cfg != nil {
+ currentAltReachedAcceptState = l.closure(input, cfg, configs,
+ currentAltReachedAcceptState, speculative, treatEOFAsEpsilon)
+ }
+ }
+ return currentAltReachedAcceptState
+}
+
+// side-effect: can alter configs.hasSemanticContext
+func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *ATNConfig, trans Transition,
+ configs *ATNConfigSet, speculative, treatEOFAsEpsilon bool) *ATNConfig {
+
+ var cfg *ATNConfig
+
+ if trans.getSerializationType() == TransitionRULE {
+
+ rt := trans.(*RuleTransition)
+ newContext := SingletonBasePredictionContextCreate(config.context, rt.followState.GetStateNumber())
+ cfg = NewLexerATNConfig2(config, trans.getTarget(), newContext)
+
+ } else if trans.getSerializationType() == TransitionPRECEDENCE {
+ panic("Precedence predicates are not supported in lexers.")
+ } else if trans.getSerializationType() == TransitionPREDICATE {
+ // Track traversing semantic predicates. If we traverse,
+ // we cannot add a DFA state for l "reach" computation
+ // because the DFA would not test the predicate again in the
+ // future. Rather than creating collections of semantic predicates
+ // like v3 and testing them on prediction, v4 will test them on the
+ // fly all the time using the ATN not the DFA. This is slower but
+ // semantically it's not used that often. One of the key elements to
+ // l predicate mechanism is not adding DFA states that see
+ // predicates immediately afterwards in the ATN. For example,
+
+ // a : ID {p1}? | ID {p2}?
+
+ // should create the start state for rule 'a' (to save start state
+ // competition), but should not create target of ID state. The
+ // collection of ATN states the following ID references includes
+ // states reached by traversing predicates. Since l is when we
+ // test them, we cannot cash the DFA state target of ID.
+
+ pt := trans.(*PredicateTransition)
+
+ if runtimeConfig.lexerATNSimulatorDebug {
+ fmt.Println("EVAL rule " + strconv.Itoa(trans.(*PredicateTransition).ruleIndex) + ":" + strconv.Itoa(pt.predIndex))
+ }
+ configs.hasSemanticContext = true
+ if l.evaluatePredicate(input, pt.ruleIndex, pt.predIndex, speculative) {
+ cfg = NewLexerATNConfig4(config, trans.getTarget())
+ }
+ } else if trans.getSerializationType() == TransitionACTION {
+ if config.context == nil || config.context.hasEmptyPath() {
+ // execute actions anywhere in the start rule for a token.
+ //
+ // TODO: if the entry rule is invoked recursively, some
+ // actions may be executed during the recursive call. The
+ // problem can appear when hasEmptyPath() is true but
+ // isEmpty() is false. In this case, the config needs to be
+ // split into two contexts - one with just the empty path
+ // and another with everything but the empty path.
+ // Unfortunately, the current algorithm does not allow
+ // getEpsilonTarget to return two configurations, so
+ // additional modifications are needed before we can support
+ // the split operation.
+ lexerActionExecutor := LexerActionExecutorappend(config.lexerActionExecutor, l.atn.lexerActions[trans.(*ActionTransition).actionIndex])
+ cfg = NewLexerATNConfig3(config, trans.getTarget(), lexerActionExecutor)
+ } else {
+ // ignore actions in referenced rules
+ cfg = NewLexerATNConfig4(config, trans.getTarget())
+ }
+ } else if trans.getSerializationType() == TransitionEPSILON {
+ cfg = NewLexerATNConfig4(config, trans.getTarget())
+ } else if trans.getSerializationType() == TransitionATOM ||
+ trans.getSerializationType() == TransitionRANGE ||
+ trans.getSerializationType() == TransitionSET {
+ if treatEOFAsEpsilon {
+ if trans.Matches(TokenEOF, 0, LexerMaxCharValue) {
+ cfg = NewLexerATNConfig4(config, trans.getTarget())
+ }
+ }
+ }
+ return cfg
+}
+
+// evaluatePredicate eEvaluates a predicate specified in the lexer.
+//
+// If speculative is true, this method was called before
+// [consume] for the Matched character. This method should call
+// [consume] before evaluating the predicate to ensure position
+// sensitive values, including [GetText], [GetLine],
+// and [GetColumn], properly reflect the current
+// lexer state. This method should restore input and the simulator
+// to the original state before returning, i.e. undo the actions made by the
+// call to [Consume].
+//
+// The func returns true if the specified predicate evaluates to true.
+func (l *LexerATNSimulator) evaluatePredicate(input CharStream, ruleIndex, predIndex int, speculative bool) bool {
+ // assume true if no recognizer was provided
+ if l.recog == nil {
+ return true
+ }
+ if !speculative {
+ return l.recog.Sempred(nil, ruleIndex, predIndex)
+ }
+ savedcolumn := l.CharPositionInLine
+ savedLine := l.Line
+ index := input.Index()
+ marker := input.Mark()
+
+ defer func() {
+ l.CharPositionInLine = savedcolumn
+ l.Line = savedLine
+ input.Seek(index)
+ input.Release(marker)
+ }()
+
+ l.Consume(input)
+ return l.recog.Sempred(nil, ruleIndex, predIndex)
+}
+
+func (l *LexerATNSimulator) captureSimState(settings *SimState, input CharStream, dfaState *DFAState) {
+ settings.index = input.Index()
+ settings.line = l.Line
+ settings.column = l.CharPositionInLine
+ settings.dfaState = dfaState
+}
+
+func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfgs *ATNConfigSet) *DFAState {
+ if to == nil && cfgs != nil {
+ // leading to l call, ATNConfigSet.hasSemanticContext is used as a
+ // marker indicating dynamic predicate evaluation makes l edge
+ // dependent on the specific input sequence, so the static edge in the
+ // DFA should be omitted. The target DFAState is still created since
+ // execATN has the ability to reSynchronize with the DFA state cache
+ // following the predicate evaluation step.
+ //
+ // TJP notes: next time through the DFA, we see a pred again and eval.
+ // If that gets us to a previously created (but dangling) DFA
+ // state, we can continue in pure DFA mode from there.
+ //
+ suppressEdge := cfgs.hasSemanticContext
+ cfgs.hasSemanticContext = false
+ to = l.addDFAState(cfgs, true)
+
+ if suppressEdge {
+ return to
+ }
+ }
+ // add the edge
+ if tk < LexerATNSimulatorMinDFAEdge || tk > LexerATNSimulatorMaxDFAEdge {
+ // Only track edges within the DFA bounds
+ return to
+ }
+ if runtimeConfig.lexerATNSimulatorDebug {
+ fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + strconv.Itoa(tk))
+ }
+ l.atn.edgeMu.Lock()
+ defer l.atn.edgeMu.Unlock()
+ if from.getEdges() == nil {
+ // make room for tokens 1..n and -1 masquerading as index 0
+ from.setEdges(make([]*DFAState, LexerATNSimulatorMaxDFAEdge-LexerATNSimulatorMinDFAEdge+1))
+ }
+ from.setIthEdge(tk-LexerATNSimulatorMinDFAEdge, to) // connect
+
+ return to
+}
+
+// Add a NewDFA state if there isn't one with l set of
+// configurations already. This method also detects the first
+// configuration containing an ATN rule stop state. Later, when
+// traversing the DFA, we will know which rule to accept.
+func (l *LexerATNSimulator) addDFAState(configs *ATNConfigSet, suppressEdge bool) *DFAState {
+
+ proposed := NewDFAState(-1, configs)
+ var firstConfigWithRuleStopState *ATNConfig
+
+ for _, cfg := range configs.configs {
+ _, ok := cfg.GetState().(*RuleStopState)
+
+ if ok {
+ firstConfigWithRuleStopState = cfg
+ break
+ }
+ }
+ if firstConfigWithRuleStopState != nil {
+ proposed.isAcceptState = true
+ proposed.lexerActionExecutor = firstConfigWithRuleStopState.lexerActionExecutor
+ proposed.setPrediction(l.atn.ruleToTokenType[firstConfigWithRuleStopState.GetState().GetRuleIndex()])
+ }
+ dfa := l.decisionToDFA[l.mode]
+
+ l.atn.stateMu.Lock()
+ defer l.atn.stateMu.Unlock()
+ existing, present := dfa.Get(proposed)
+ if present {
+
+ // This state was already present, so just return it.
+ //
+ proposed = existing
+ } else {
+
+ // We need to add the new state
+ //
+ proposed.stateNumber = dfa.Len()
+ configs.readOnly = true
+ configs.configLookup = nil // Not needed now
+ proposed.configs = configs
+ dfa.Put(proposed)
+ }
+ if !suppressEdge {
+ dfa.setS0(proposed)
+ }
+ return proposed
+}
+
+func (l *LexerATNSimulator) getDFA(mode int) *DFA {
+ return l.decisionToDFA[mode]
+}
+
+// GetText returns the text [Match]ed so far for the current token.
+func (l *LexerATNSimulator) GetText(input CharStream) string {
+ // index is first lookahead char, don't include.
+ return input.GetTextFromInterval(NewInterval(l.startIndex, input.Index()-1))
+}
+
+func (l *LexerATNSimulator) Consume(input CharStream) {
+ curChar := input.LA(1)
+ if curChar == int('\n') {
+ l.Line++
+ l.CharPositionInLine = 0
+ } else {
+ l.CharPositionInLine++
+ }
+ input.Consume()
+}
+
+func (l *LexerATNSimulator) GetCharPositionInLine() int {
+ return l.CharPositionInLine
+}
+
+func (l *LexerATNSimulator) GetLine() int {
+ return l.Line
+}
+
+func (l *LexerATNSimulator) GetTokenName(tt int) string {
+ if tt == -1 {
+ return "EOF"
+ }
+
+ var sb strings.Builder
+ sb.Grow(6)
+ sb.WriteByte('\'')
+ sb.WriteRune(rune(tt))
+ sb.WriteByte('\'')
+
+ return sb.String()
+}
+
+func resetSimState(sim *SimState) {
+ sim.index = -1
+ sim.line = 0
+ sim.column = -1
+ sim.dfaState = nil
+}
+
+type SimState struct {
+ index int
+ line int
+ column int
+ dfaState *DFAState
+}
+
+func NewSimState() *SimState {
+ s := new(SimState)
+ resetSimState(s)
+ return s
+}
+
+func (s *SimState) reset() {
+ resetSimState(s)
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go b/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go
new file mode 100644
index 000000000..4955ac876
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go
@@ -0,0 +1,218 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+type LL1Analyzer struct {
+ atn *ATN
+}
+
+func NewLL1Analyzer(atn *ATN) *LL1Analyzer {
+ la := new(LL1Analyzer)
+ la.atn = atn
+ return la
+}
+
+const (
+ // LL1AnalyzerHitPred is a special value added to the lookahead sets to indicate that we hit
+ // a predicate during analysis if
+ //
+ // seeThruPreds==false
+ LL1AnalyzerHitPred = TokenInvalidType
+)
+
+// *
+// Calculates the SLL(1) expected lookahead set for each outgoing transition
+// of an {@link ATNState}. The returned array has one element for each
+// outgoing transition in {@code s}. If the closure from transition
+// i leads to a semantic predicate before Matching a symbol, the
+// element at index i of the result will be {@code nil}.
+//
+// @param s the ATN state
+// @return the expected symbols for each outgoing transition of {@code s}.
+func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet {
+ if s == nil {
+ return nil
+ }
+ count := len(s.GetTransitions())
+ look := make([]*IntervalSet, count)
+ for alt := 0; alt < count; alt++ {
+
+ look[alt] = NewIntervalSet()
+ lookBusy := NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, "LL1Analyzer.getDecisionLookahead for lookBusy")
+ la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), false, false)
+
+ // Wipe out lookahead for la alternative if we found nothing,
+ // or we had a predicate when we !seeThruPreds
+ if look[alt].length() == 0 || look[alt].contains(LL1AnalyzerHitPred) {
+ look[alt] = nil
+ }
+ }
+ return look
+}
+
+// Look computes the set of tokens that can follow s in the [ATN] in the
+// specified ctx.
+//
+// If ctx is nil and the end of the rule containing
+// s is reached, [EPSILON] is added to the result set.
+//
+// If ctx is not nil and the end of the outermost rule is
+// reached, [EOF] is added to the result set.
+//
+// Parameter s the ATN state, and stopState is the ATN state to stop at. This can be a
+// [BlockEndState] to detect epsilon paths through a closure.
+//
+// Parameter ctx is the complete parser context, or nil if the context
+// should be ignored
+//
+// The func returns the set of tokens that can follow s in the [ATN] in the
+// specified ctx.
+func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet {
+ r := NewIntervalSet()
+ var lookContext *PredictionContext
+ if ctx != nil {
+ lookContext = predictionContextFromRuleContext(s.GetATN(), ctx)
+ }
+ la.look1(s, stopState, lookContext, r, NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, "LL1Analyzer.Look for la.look1()"),
+ NewBitSet(), true, true)
+ return r
+}
+
+//*
+// Compute set of tokens that can follow {@code s} in the ATN in the
+// specified {@code ctx}.
+//
+//
If {@code ctx} is {@code nil} and {@code stopState} or the end of the
+// rule containing {@code s} is reached, {@link Token//EPSILON} is added to
+// the result set. If {@code ctx} is not {@code nil} and {@code addEOF} is
+// {@code true} and {@code stopState} or the end of the outermost rule is
+// reached, {@link Token//EOF} is added to the result set.
+//
+// @param s the ATN state.
+// @param stopState the ATN state to stop at. This can be a
+// {@link BlockEndState} to detect epsilon paths through a closure.
+// @param ctx The outer context, or {@code nil} if the outer context should
+// not be used.
+// @param look The result lookahead set.
+// @param lookBusy A set used for preventing epsilon closures in the ATN
+// from causing a stack overflow. Outside code should pass
+// {@code NewSet} for la argument.
+// @param calledRuleStack A set used for preventing left recursion in the
+// ATN from causing a stack overflow. Outside code should pass
+// {@code NewBitSet()} for la argument.
+// @param seeThruPreds {@code true} to true semantic predicates as
+// implicitly {@code true} and "see through them", otherwise {@code false}
+// to treat semantic predicates as opaque and add {@link //HitPred} to the
+// result if one is encountered.
+// @param addEOF Add {@link Token//EOF} to the result if the end of the
+// outermost context is reached. This parameter has no effect if {@code ctx}
+// is {@code nil}.
+
+func (la *LL1Analyzer) look2(_, stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[*ATNConfig, Comparator[*ATNConfig]],
+ calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) {
+
+ returnState := la.atn.states[ctx.getReturnState(i)]
+ la.look1(returnState, stopState, ctx.GetParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
+
+}
+
+func (la *LL1Analyzer) look1(s, stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[*ATNConfig, Comparator[*ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool) {
+
+ c := NewATNConfig6(s, 0, ctx)
+
+ if lookBusy.Contains(c) {
+ return
+ }
+
+ _, present := lookBusy.Put(c)
+ if present {
+ return
+
+ }
+ if s == stopState {
+ if ctx == nil {
+ look.addOne(TokenEpsilon)
+ return
+ } else if ctx.isEmpty() && addEOF {
+ look.addOne(TokenEOF)
+ return
+ }
+ }
+
+ _, ok := s.(*RuleStopState)
+
+ if ok {
+ if ctx == nil {
+ look.addOne(TokenEpsilon)
+ return
+ } else if ctx.isEmpty() && addEOF {
+ look.addOne(TokenEOF)
+ return
+ }
+
+ if ctx.pcType != PredictionContextEmpty {
+ removed := calledRuleStack.contains(s.GetRuleIndex())
+ defer func() {
+ if removed {
+ calledRuleStack.add(s.GetRuleIndex())
+ }
+ }()
+ calledRuleStack.remove(s.GetRuleIndex())
+ // run thru all possible stack tops in ctx
+ for i := 0; i < ctx.length(); i++ {
+ returnState := la.atn.states[ctx.getReturnState(i)]
+ la.look2(returnState, stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, i)
+ }
+ return
+ }
+ }
+
+ n := len(s.GetTransitions())
+
+ for i := 0; i < n; i++ {
+ t := s.GetTransitions()[i]
+
+ if t1, ok := t.(*RuleTransition); ok {
+ if calledRuleStack.contains(t1.getTarget().GetRuleIndex()) {
+ continue
+ }
+
+ newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber())
+ la.look3(stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, t1)
+ } else if t2, ok := t.(AbstractPredicateTransition); ok {
+ if seeThruPreds {
+ la.look1(t2.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
+ } else {
+ look.addOne(LL1AnalyzerHitPred)
+ }
+ } else if t.getIsEpsilon() {
+ la.look1(t.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
+ } else if _, ok := t.(*WildcardTransition); ok {
+ look.addRange(TokenMinUserTokenType, la.atn.maxTokenType)
+ } else {
+ set := t.getLabel()
+ if set != nil {
+ if _, ok := t.(*NotSetTransition); ok {
+ set = set.complement(TokenMinUserTokenType, la.atn.maxTokenType)
+ }
+ look.addSet(set)
+ }
+ }
+ }
+}
+
+func (la *LL1Analyzer) look3(stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[*ATNConfig, Comparator[*ATNConfig]],
+ calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) {
+
+ newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber())
+
+ defer func() {
+ calledRuleStack.remove(t1.getTarget().GetRuleIndex())
+ }()
+
+ calledRuleStack.add(t1.getTarget().GetRuleIndex())
+ la.look1(t1.getTarget(), stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
+
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/nostatistics.go b/vendor/github.com/antlr4-go/antlr/v4/nostatistics.go
new file mode 100644
index 000000000..923c7b52c
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/nostatistics.go
@@ -0,0 +1,47 @@
+//go:build !antlr.stats
+
+package antlr
+
+// This file is compiled when the build configuration antlr.stats is not enabled.
+// which then allows the compiler to optimize out all the code that is not used.
+const collectStats = false
+
+// goRunStats is a dummy struct used when build configuration antlr.stats is not enabled.
+type goRunStats struct {
+}
+
+var Statistics = &goRunStats{}
+
+func (s *goRunStats) AddJStatRec(_ *JStatRec) {
+ // Do nothing - compiler will optimize this out (hopefully)
+}
+
+func (s *goRunStats) CollectionAnomalies() {
+ // Do nothing - compiler will optimize this out (hopefully)
+}
+
+func (s *goRunStats) Reset() {
+ // Do nothing - compiler will optimize this out (hopefully)
+}
+
+func (s *goRunStats) Report(dir string, prefix string) error {
+ // Do nothing - compiler will optimize this out (hopefully)
+ return nil
+}
+
+func (s *goRunStats) Analyze() {
+ // Do nothing - compiler will optimize this out (hopefully)
+}
+
+type statsOption func(*goRunStats) error
+
+func (s *goRunStats) Configure(options ...statsOption) error {
+ // Do nothing - compiler will optimize this out (hopefully)
+ return nil
+}
+
+func WithTopN(topN int) statsOption {
+ return func(s *goRunStats) error {
+ return nil
+ }
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/parser.go b/vendor/github.com/antlr4-go/antlr/v4/parser.go
new file mode 100644
index 000000000..fb57ac15d
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/parser.go
@@ -0,0 +1,700 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+)
+
+type Parser interface {
+ Recognizer
+
+ GetInterpreter() *ParserATNSimulator
+
+ GetTokenStream() TokenStream
+ GetTokenFactory() TokenFactory
+ GetParserRuleContext() ParserRuleContext
+ SetParserRuleContext(ParserRuleContext)
+ Consume() Token
+ GetParseListeners() []ParseTreeListener
+
+ GetErrorHandler() ErrorStrategy
+ SetErrorHandler(ErrorStrategy)
+ GetInputStream() IntStream
+ GetCurrentToken() Token
+ GetExpectedTokens() *IntervalSet
+ NotifyErrorListeners(string, Token, RecognitionException)
+ IsExpectedToken(int) bool
+ GetPrecedence() int
+ GetRuleInvocationStack(ParserRuleContext) []string
+}
+
+type BaseParser struct {
+ *BaseRecognizer
+
+ Interpreter *ParserATNSimulator
+ BuildParseTrees bool
+
+ input TokenStream
+ errHandler ErrorStrategy
+ precedenceStack IntStack
+ ctx ParserRuleContext
+
+ tracer *TraceListener
+ parseListeners []ParseTreeListener
+ _SyntaxErrors int
+}
+
+// NewBaseParser contains all the parsing support code to embed in parsers. Essentially most of it is error
+// recovery stuff.
+//
+//goland:noinspection GoUnusedExportedFunction
+func NewBaseParser(input TokenStream) *BaseParser {
+
+ p := new(BaseParser)
+
+ p.BaseRecognizer = NewBaseRecognizer()
+
+ // The input stream.
+ p.input = nil
+
+ // The error handling strategy for the parser. The default value is a new
+ // instance of {@link DefaultErrorStrategy}.
+ p.errHandler = NewDefaultErrorStrategy()
+ p.precedenceStack = make([]int, 0)
+ p.precedenceStack.Push(0)
+
+ // The ParserRuleContext object for the currently executing rule.
+ // p.is always non-nil during the parsing process.
+ p.ctx = nil
+
+ // Specifies whether the parser should construct a parse tree during
+ // the parsing process. The default value is {@code true}.
+ p.BuildParseTrees = true
+
+ // When setTrace(true) is called, a reference to the
+ // TraceListener is stored here, so it can be easily removed in a
+ // later call to setTrace(false). The listener itself is
+ // implemented as a parser listener so p.field is not directly used by
+ // other parser methods.
+ p.tracer = nil
+
+ // The list of ParseTreeListener listeners registered to receive
+ // events during the parse.
+ p.parseListeners = nil
+
+ // The number of syntax errors Reported during parsing. p.value is
+ // incremented each time NotifyErrorListeners is called.
+ p._SyntaxErrors = 0
+ p.SetInputStream(input)
+
+ return p
+}
+
+// This field maps from the serialized ATN string to the deserialized [ATN] with
+// bypass alternatives.
+//
+// [ATNDeserializationOptions.isGenerateRuleBypassTransitions]
+//
+//goland:noinspection GoUnusedGlobalVariable
+var bypassAltsAtnCache = make(map[string]int)
+
+// reset the parser's state//
+func (p *BaseParser) reset() {
+ if p.input != nil {
+ p.input.Seek(0)
+ }
+ p.errHandler.reset(p)
+ p.ctx = nil
+ p._SyntaxErrors = 0
+ p.SetTrace(nil)
+ p.precedenceStack = make([]int, 0)
+ p.precedenceStack.Push(0)
+ if p.Interpreter != nil {
+ p.Interpreter.reset()
+ }
+}
+
+func (p *BaseParser) GetErrorHandler() ErrorStrategy {
+ return p.errHandler
+}
+
+func (p *BaseParser) SetErrorHandler(e ErrorStrategy) {
+ p.errHandler = e
+}
+
+// Match current input symbol against {@code ttype}. If the symbol type
+// Matches, {@link ANTLRErrorStrategy//ReportMatch} and {@link //consume} are
+// called to complete the Match process.
+//
+//
If the symbol type does not Match,
+// {@link ANTLRErrorStrategy//recoverInline} is called on the current error
+// strategy to attempt recovery. If {@link //getBuildParseTree} is
+// {@code true} and the token index of the symbol returned by
+// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to
+// the parse tree by calling {@link ParserRuleContext//addErrorNode}.
+//
+// @param ttype the token type to Match
+// @return the Matched symbol
+// @panics RecognitionException if the current input symbol did not Match
+// {@code ttype} and the error strategy could not recover from the
+// mismatched symbol
+
+func (p *BaseParser) Match(ttype int) Token {
+
+ t := p.GetCurrentToken()
+
+ if t.GetTokenType() == ttype {
+ p.errHandler.ReportMatch(p)
+ p.Consume()
+ } else {
+ t = p.errHandler.RecoverInline(p)
+ if p.HasError() {
+ return nil
+ }
+ if p.BuildParseTrees && t.GetTokenIndex() == -1 {
+
+ // we must have conjured up a new token during single token
+ // insertion if it's not the current symbol
+ p.ctx.AddErrorNode(t)
+ }
+ }
+
+ return t
+}
+
+// Match current input symbol as a wildcard. If the symbol type Matches
+// (i.e. has a value greater than 0), {@link ANTLRErrorStrategy//ReportMatch}
+// and {@link //consume} are called to complete the Match process.
+//
+//
If the symbol type does not Match,
+// {@link ANTLRErrorStrategy//recoverInline} is called on the current error
+// strategy to attempt recovery. If {@link //getBuildParseTree} is
+// {@code true} and the token index of the symbol returned by
+// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to
+// the parse tree by calling {@link ParserRuleContext//addErrorNode}.
+//
+// @return the Matched symbol
+// @panics RecognitionException if the current input symbol did not Match
+// a wildcard and the error strategy could not recover from the mismatched
+// symbol
+
+func (p *BaseParser) MatchWildcard() Token {
+ t := p.GetCurrentToken()
+ if t.GetTokenType() > 0 {
+ p.errHandler.ReportMatch(p)
+ p.Consume()
+ } else {
+ t = p.errHandler.RecoverInline(p)
+ if p.BuildParseTrees && t.GetTokenIndex() == -1 {
+ // we must have conjured up a new token during single token
+ // insertion if it's not the current symbol
+ p.ctx.AddErrorNode(t)
+ }
+ }
+ return t
+}
+
+func (p *BaseParser) GetParserRuleContext() ParserRuleContext {
+ return p.ctx
+}
+
+func (p *BaseParser) SetParserRuleContext(v ParserRuleContext) {
+ p.ctx = v
+}
+
+func (p *BaseParser) GetParseListeners() []ParseTreeListener {
+ if p.parseListeners == nil {
+ return make([]ParseTreeListener, 0)
+ }
+ return p.parseListeners
+}
+
+// AddParseListener registers listener to receive events during the parsing process.
+//
+// To support output-preserving grammar transformations (including but not
+// limited to left-recursion removal, automated left-factoring, and
+// optimized code generation), calls to listener methods during the parse
+// may differ substantially from calls made by
+// [ParseTreeWalker.DEFAULT] used after the parse is complete. In
+// particular, rule entry and exit events may occur in a different order
+// during the parse than after the parser. In addition, calls to certain
+// rule entry methods may be omitted.
+//
+// With the following specific exceptions, calls to listener events are
+// deterministic, i.e. for identical input the calls to listener
+// methods will be the same.
+//
+// - Alterations to the grammar used to generate code may change the
+// behavior of the listener calls.
+// - Alterations to the command line options passed to ANTLR 4 when
+// generating the parser may change the behavior of the listener calls.
+// - Changing the version of the ANTLR Tool used to generate the parser
+// may change the behavior of the listener calls.
+func (p *BaseParser) AddParseListener(listener ParseTreeListener) {
+ if listener == nil {
+ panic("listener")
+ }
+ if p.parseListeners == nil {
+ p.parseListeners = make([]ParseTreeListener, 0)
+ }
+ p.parseListeners = append(p.parseListeners, listener)
+}
+
+// RemoveParseListener removes listener from the list of parse listeners.
+//
+// If listener is nil or has not been added as a parse
+// listener, this func does nothing.
+func (p *BaseParser) RemoveParseListener(listener ParseTreeListener) {
+
+ if p.parseListeners != nil {
+
+ idx := -1
+ for i, v := range p.parseListeners {
+ if v == listener {
+ idx = i
+ break
+ }
+ }
+
+ if idx == -1 {
+ return
+ }
+
+ // remove the listener from the slice
+ p.parseListeners = append(p.parseListeners[0:idx], p.parseListeners[idx+1:]...)
+
+ if len(p.parseListeners) == 0 {
+ p.parseListeners = nil
+ }
+ }
+}
+
+// Remove all parse listeners.
+func (p *BaseParser) removeParseListeners() {
+ p.parseListeners = nil
+}
+
+// TriggerEnterRuleEvent notifies all parse listeners of an enter rule event.
+func (p *BaseParser) TriggerEnterRuleEvent() {
+ if p.parseListeners != nil {
+ ctx := p.ctx
+ for _, listener := range p.parseListeners {
+ listener.EnterEveryRule(ctx)
+ ctx.EnterRule(listener)
+ }
+ }
+}
+
+// TriggerExitRuleEvent notifies any parse listeners of an exit rule event.
+func (p *BaseParser) TriggerExitRuleEvent() {
+ if p.parseListeners != nil {
+ // reverse order walk of listeners
+ ctx := p.ctx
+ l := len(p.parseListeners) - 1
+
+ for i := range p.parseListeners {
+ listener := p.parseListeners[l-i]
+ ctx.ExitRule(listener)
+ listener.ExitEveryRule(ctx)
+ }
+ }
+}
+
+func (p *BaseParser) GetInterpreter() *ParserATNSimulator {
+ return p.Interpreter
+}
+
+func (p *BaseParser) GetATN() *ATN {
+ return p.Interpreter.atn
+}
+
+func (p *BaseParser) GetTokenFactory() TokenFactory {
+ return p.input.GetTokenSource().GetTokenFactory()
+}
+
+// setTokenFactory is used to tell our token source and error strategy about a new way to create tokens.
+func (p *BaseParser) setTokenFactory(factory TokenFactory) {
+ p.input.GetTokenSource().setTokenFactory(factory)
+}
+
+// GetATNWithBypassAlts - the ATN with bypass alternatives is expensive to create, so we create it
+// lazily.
+func (p *BaseParser) GetATNWithBypassAlts() {
+
+ // TODO - Implement this?
+ panic("Not implemented!")
+
+ // serializedAtn := p.getSerializedATN()
+ // if (serializedAtn == nil) {
+ // panic("The current parser does not support an ATN with bypass alternatives.")
+ // }
+ // result := p.bypassAltsAtnCache[serializedAtn]
+ // if (result == nil) {
+ // deserializationOptions := NewATNDeserializationOptions(nil)
+ // deserializationOptions.generateRuleBypassTransitions = true
+ // result = NewATNDeserializer(deserializationOptions).deserialize(serializedAtn)
+ // p.bypassAltsAtnCache[serializedAtn] = result
+ // }
+ // return result
+}
+
+// The preferred method of getting a tree pattern. For example, here's a
+// sample use:
+//
+//
+// ParseTree t = parser.expr()
+// ParseTreePattern p = parser.compileParseTreePattern("<ID>+0",
+// MyParser.RULE_expr)
+// ParseTreeMatch m = p.Match(t)
+// String id = m.Get("ID")
+//
+
+//goland:noinspection GoUnusedParameter
+func (p *BaseParser) compileParseTreePattern(pattern, patternRuleIndex, lexer Lexer) {
+
+ panic("NewParseTreePatternMatcher not implemented!")
+ //
+ // if (lexer == nil) {
+ // if (p.GetTokenStream() != nil) {
+ // tokenSource := p.GetTokenStream().GetTokenSource()
+ // if _, ok := tokenSource.(ILexer); ok {
+ // lexer = tokenSource
+ // }
+ // }
+ // }
+ // if (lexer == nil) {
+ // panic("Parser can't discover a lexer to use")
+ // }
+
+ // m := NewParseTreePatternMatcher(lexer, p)
+ // return m.compile(pattern, patternRuleIndex)
+}
+
+func (p *BaseParser) GetInputStream() IntStream {
+ return p.GetTokenStream()
+}
+
+func (p *BaseParser) SetInputStream(input TokenStream) {
+ p.SetTokenStream(input)
+}
+
+func (p *BaseParser) GetTokenStream() TokenStream {
+ return p.input
+}
+
+// SetTokenStream installs input as the token stream and resets the parser.
+func (p *BaseParser) SetTokenStream(input TokenStream) {
+ p.input = nil
+ p.reset()
+ p.input = input
+}
+
+// GetCurrentToken returns the current token at LT(1).
+//
+// [Match] needs to return the current input symbol, which gets put
+// into the label for the associated token ref e.g., x=ID.
+func (p *BaseParser) GetCurrentToken() Token {
+ return p.input.LT(1)
+}
+
+func (p *BaseParser) NotifyErrorListeners(msg string, offendingToken Token, err RecognitionException) {
+ if offendingToken == nil {
+ offendingToken = p.GetCurrentToken()
+ }
+ p._SyntaxErrors++
+ line := offendingToken.GetLine()
+ column := offendingToken.GetColumn()
+ listener := p.GetErrorListenerDispatch()
+ listener.SyntaxError(p, offendingToken, line, column, msg, err)
+}
+
+func (p *BaseParser) Consume() Token {
+ o := p.GetCurrentToken()
+ if o.GetTokenType() != TokenEOF {
+ p.GetInputStream().Consume()
+ }
+ hasListener := p.parseListeners != nil && len(p.parseListeners) > 0
+ if p.BuildParseTrees || hasListener {
+ if p.errHandler.InErrorRecoveryMode(p) {
+ node := p.ctx.AddErrorNode(o)
+ if p.parseListeners != nil {
+ for _, l := range p.parseListeners {
+ l.VisitErrorNode(node)
+ }
+ }
+
+ } else {
+ node := p.ctx.AddTokenNode(o)
+ if p.parseListeners != nil {
+ for _, l := range p.parseListeners {
+ l.VisitTerminal(node)
+ }
+ }
+ }
+ // node.invokingState = p.state
+ }
+
+ return o
+}
+
+func (p *BaseParser) addContextToParseTree() {
+ // add current context to parent if we have a parent
+ if p.ctx.GetParent() != nil {
+ p.ctx.GetParent().(ParserRuleContext).AddChild(p.ctx)
+ }
+}
+
+func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, _ int) {
+ p.SetState(state)
+ p.ctx = localctx
+ p.ctx.SetStart(p.input.LT(1))
+ if p.BuildParseTrees {
+ p.addContextToParseTree()
+ }
+ if p.parseListeners != nil {
+ p.TriggerEnterRuleEvent()
+ }
+}
+
+func (p *BaseParser) ExitRule() {
+ p.ctx.SetStop(p.input.LT(-1))
+ // trigger event on ctx, before it reverts to parent
+ if p.parseListeners != nil {
+ p.TriggerExitRuleEvent()
+ }
+ p.SetState(p.ctx.GetInvokingState())
+ if p.ctx.GetParent() != nil {
+ p.ctx = p.ctx.GetParent().(ParserRuleContext)
+ } else {
+ p.ctx = nil
+ }
+}
+
+func (p *BaseParser) EnterOuterAlt(localctx ParserRuleContext, altNum int) {
+ localctx.SetAltNumber(altNum)
+ // if we have a new localctx, make sure we replace existing ctx
+ // that is previous child of parse tree
+ if p.BuildParseTrees && p.ctx != localctx {
+ if p.ctx.GetParent() != nil {
+ p.ctx.GetParent().(ParserRuleContext).RemoveLastChild()
+ p.ctx.GetParent().(ParserRuleContext).AddChild(localctx)
+ }
+ }
+ p.ctx = localctx
+}
+
+// Get the precedence level for the top-most precedence rule.
+//
+// @return The precedence level for the top-most precedence rule, or -1 if
+// the parser context is not nested within a precedence rule.
+
+func (p *BaseParser) GetPrecedence() int {
+ if len(p.precedenceStack) == 0 {
+ return -1
+ }
+
+ return p.precedenceStack[len(p.precedenceStack)-1]
+}
+
+func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, _, precedence int) {
+ p.SetState(state)
+ p.precedenceStack.Push(precedence)
+ p.ctx = localctx
+ p.ctx.SetStart(p.input.LT(1))
+ if p.parseListeners != nil {
+ p.TriggerEnterRuleEvent() // simulates rule entry for
+ // left-recursive rules
+ }
+}
+
+//
+// Like {@link //EnterRule} but for recursive rules.
+
+func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, _ int) {
+ previous := p.ctx
+ previous.SetParent(localctx)
+ previous.SetInvokingState(state)
+ previous.SetStop(p.input.LT(-1))
+
+ p.ctx = localctx
+ p.ctx.SetStart(previous.GetStart())
+ if p.BuildParseTrees {
+ p.ctx.AddChild(previous)
+ }
+ if p.parseListeners != nil {
+ p.TriggerEnterRuleEvent() // simulates rule entry for
+ // left-recursive rules
+ }
+}
+
+func (p *BaseParser) UnrollRecursionContexts(parentCtx ParserRuleContext) {
+ _, _ = p.precedenceStack.Pop()
+ p.ctx.SetStop(p.input.LT(-1))
+ retCtx := p.ctx // save current ctx (return value)
+ // unroll so ctx is as it was before call to recursive method
+ if p.parseListeners != nil {
+ for p.ctx != parentCtx {
+ p.TriggerExitRuleEvent()
+ p.ctx = p.ctx.GetParent().(ParserRuleContext)
+ }
+ } else {
+ p.ctx = parentCtx
+ }
+ // hook into tree
+ retCtx.SetParent(parentCtx)
+ if p.BuildParseTrees && parentCtx != nil {
+ // add return ctx into invoking rule's tree
+ parentCtx.AddChild(retCtx)
+ }
+}
+
+func (p *BaseParser) GetInvokingContext(ruleIndex int) ParserRuleContext {
+ ctx := p.ctx
+ for ctx != nil {
+ if ctx.GetRuleIndex() == ruleIndex {
+ return ctx
+ }
+ ctx = ctx.GetParent().(ParserRuleContext)
+ }
+ return nil
+}
+
+func (p *BaseParser) Precpred(_ RuleContext, precedence int) bool {
+ return precedence >= p.precedenceStack[len(p.precedenceStack)-1]
+}
+
+//goland:noinspection GoUnusedParameter
+func (p *BaseParser) inContext(context ParserRuleContext) bool {
+ // TODO: useful in parser?
+ return false
+}
+
+// IsExpectedToken checks whether symbol can follow the current state in the
+// {ATN}. The behavior of p.method is equivalent to the following, but is
+// implemented such that the complete context-sensitive follow set does not
+// need to be explicitly constructed.
+//
+// return getExpectedTokens().contains(symbol)
+func (p *BaseParser) IsExpectedToken(symbol int) bool {
+ atn := p.Interpreter.atn
+ ctx := p.ctx
+ s := atn.states[p.state]
+ following := atn.NextTokens(s, nil)
+ if following.contains(symbol) {
+ return true
+ }
+ if !following.contains(TokenEpsilon) {
+ return false
+ }
+ for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) {
+ invokingState := atn.states[ctx.GetInvokingState()]
+ rt := invokingState.GetTransitions()[0]
+ following = atn.NextTokens(rt.(*RuleTransition).followState, nil)
+ if following.contains(symbol) {
+ return true
+ }
+ ctx = ctx.GetParent().(ParserRuleContext)
+ }
+ if following.contains(TokenEpsilon) && symbol == TokenEOF {
+ return true
+ }
+
+ return false
+}
+
+// GetExpectedTokens and returns the set of input symbols which could follow the current parser
+// state and context, as given by [GetState] and [GetContext],
+// respectively.
+func (p *BaseParser) GetExpectedTokens() *IntervalSet {
+ return p.Interpreter.atn.getExpectedTokens(p.state, p.ctx)
+}
+
+func (p *BaseParser) GetExpectedTokensWithinCurrentRule() *IntervalSet {
+ atn := p.Interpreter.atn
+ s := atn.states[p.state]
+ return atn.NextTokens(s, nil)
+}
+
+// GetRuleIndex get a rule's index (i.e., RULE_ruleName field) or -1 if not found.
+func (p *BaseParser) GetRuleIndex(ruleName string) int {
+ var ruleIndex, ok = p.GetRuleIndexMap()[ruleName]
+ if ok {
+ return ruleIndex
+ }
+
+ return -1
+}
+
+// GetRuleInvocationStack returns a list of the rule names in your parser instance
+// leading up to a call to the current rule. You could override if
+// you want more details such as the file/line info of where
+// in the ATN a rule is invoked.
+func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string {
+ if c == nil {
+ c = p.ctx
+ }
+ stack := make([]string, 0)
+ for c != nil {
+ // compute what follows who invoked us
+ ruleIndex := c.GetRuleIndex()
+ if ruleIndex < 0 {
+ stack = append(stack, "n/a")
+ } else {
+ stack = append(stack, p.GetRuleNames()[ruleIndex])
+ }
+
+ vp := c.GetParent()
+
+ if vp == nil {
+ break
+ }
+
+ c = vp.(ParserRuleContext)
+ }
+ return stack
+}
+
+// GetDFAStrings returns a list of all DFA states used for debugging purposes
+func (p *BaseParser) GetDFAStrings() string {
+ return fmt.Sprint(p.Interpreter.decisionToDFA)
+}
+
+// DumpDFA prints the whole of the DFA for debugging
+func (p *BaseParser) DumpDFA() {
+ seenOne := false
+ for _, dfa := range p.Interpreter.decisionToDFA {
+ if dfa.Len() > 0 {
+ if seenOne {
+ fmt.Println()
+ }
+ fmt.Println("Decision " + strconv.Itoa(dfa.decision) + ":")
+ fmt.Print(dfa.String(p.LiteralNames, p.SymbolicNames))
+ seenOne = true
+ }
+ }
+}
+
+func (p *BaseParser) GetSourceName() string {
+ return p.GrammarFileName
+}
+
+// SetTrace installs a trace listener for the parse.
+//
+// During a parse it is sometimes useful to listen in on the rule entry and exit
+// events as well as token Matches. This is for quick and dirty debugging.
+func (p *BaseParser) SetTrace(trace *TraceListener) {
+ if trace == nil {
+ p.RemoveParseListener(p.tracer)
+ p.tracer = nil
+ } else {
+ if p.tracer != nil {
+ p.RemoveParseListener(p.tracer)
+ }
+ p.tracer = NewTraceListener(p)
+ p.AddParseListener(p.tracer)
+ }
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go b/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go
new file mode 100644
index 000000000..ae2869692
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go
@@ -0,0 +1,1668 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+var ()
+
+// ClosureBusy is a store of ATNConfigs and is a tiny abstraction layer over
+// a standard JStore so that we can use Lazy instantiation of the JStore, mostly
+// to avoid polluting the stats module with a ton of JStore instances with nothing in them.
+type ClosureBusy struct {
+ bMap *JStore[*ATNConfig, Comparator[*ATNConfig]]
+ desc string
+}
+
+// NewClosureBusy creates a new ClosureBusy instance used to avoid infinite recursion for right-recursive rules
+func NewClosureBusy(desc string) *ClosureBusy {
+ return &ClosureBusy{
+ desc: desc,
+ }
+}
+
+func (c *ClosureBusy) Put(config *ATNConfig) (*ATNConfig, bool) {
+ if c.bMap == nil {
+ c.bMap = NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, c.desc)
+ }
+ return c.bMap.Put(config)
+}
+
+type ParserATNSimulator struct {
+ BaseATNSimulator
+
+ parser Parser
+ predictionMode int
+ input TokenStream
+ startIndex int
+ dfa *DFA
+ mergeCache *JPCMap
+ outerContext ParserRuleContext
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func NewParserATNSimulator(parser Parser, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *ParserATNSimulator {
+
+ p := &ParserATNSimulator{
+ BaseATNSimulator: BaseATNSimulator{
+ atn: atn,
+ sharedContextCache: sharedContextCache,
+ },
+ }
+
+ p.parser = parser
+ p.decisionToDFA = decisionToDFA
+ // SLL, LL, or LL + exact ambig detection?//
+ p.predictionMode = PredictionModeLL
+ // LAME globals to avoid parameters!!!!! I need these down deep in predTransition
+ p.input = nil
+ p.startIndex = 0
+ p.outerContext = nil
+ p.dfa = nil
+ // Each prediction operation uses a cache for merge of prediction contexts.
+ // Don't keep around as it wastes huge amounts of memory. [JPCMap]
+ // isn't Synchronized, but we're ok since two threads shouldn't reuse same
+ // parser/atn-simulator object because it can only handle one input at a time.
+ // This maps graphs a and b to merged result c. (a,b) -> c. We can avoid
+ // the merge if we ever see a and b again. Note that (b,a) -> c should
+ // also be examined during cache lookup.
+ //
+ p.mergeCache = nil
+
+ return p
+}
+
+func (p *ParserATNSimulator) GetPredictionMode() int {
+ return p.predictionMode
+}
+
+func (p *ParserATNSimulator) SetPredictionMode(v int) {
+ p.predictionMode = v
+}
+
+func (p *ParserATNSimulator) reset() {
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) AdaptivePredict(parser *BaseParser, input TokenStream, decision int, outerContext ParserRuleContext) int {
+ if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("adaptivePredict decision " + strconv.Itoa(decision) +
+ " exec LA(1)==" + p.getLookaheadName(input) +
+ " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" +
+ strconv.Itoa(input.LT(1).GetColumn()))
+ }
+ p.input = input
+ p.startIndex = input.Index()
+ p.outerContext = outerContext
+
+ dfa := p.decisionToDFA[decision]
+ p.dfa = dfa
+ m := input.Mark()
+ index := input.Index()
+
+ defer func() {
+ p.dfa = nil
+ p.mergeCache = nil // whack cache after each prediction
+ // Do not attempt to run a GC now that we're done with the cache as makes the
+ // GC overhead terrible for badly formed grammars and has little effect on well formed
+ // grammars.
+ // I have made some extra effort to try and reduce memory pressure by reusing allocations when
+ // possible. However, it can only have a limited effect. The real solution is to encourage grammar
+ // authors to think more carefully about their grammar and to use the new antlr.stats tag to inspect
+ // what is happening at runtime, along with using the error listener to report ambiguities.
+
+ input.Seek(index)
+ input.Release(m)
+ }()
+
+ // Now we are certain to have a specific decision's DFA
+ // But, do we still need an initial state?
+ var s0 *DFAState
+ p.atn.stateMu.RLock()
+ if dfa.getPrecedenceDfa() {
+ p.atn.edgeMu.RLock()
+ // the start state for a precedence DFA depends on the current
+ // parser precedence, and is provided by a DFA method.
+ s0 = dfa.getPrecedenceStartState(p.parser.GetPrecedence())
+ p.atn.edgeMu.RUnlock()
+ } else {
+ // the start state for a "regular" DFA is just s0
+ s0 = dfa.getS0()
+ }
+ p.atn.stateMu.RUnlock()
+
+ if s0 == nil {
+ if outerContext == nil {
+ outerContext = ParserRuleContextEmpty
+ }
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("predictATN decision " + strconv.Itoa(dfa.decision) +
+ " exec LA(1)==" + p.getLookaheadName(input) +
+ ", outerContext=" + outerContext.String(p.parser.GetRuleNames(), nil))
+ }
+ fullCtx := false
+ s0Closure := p.computeStartState(dfa.atnStartState, ParserRuleContextEmpty, fullCtx)
+
+ p.atn.stateMu.Lock()
+ if dfa.getPrecedenceDfa() {
+ // If p is a precedence DFA, we use applyPrecedenceFilter
+ // to convert the computed start state to a precedence start
+ // state. We then use DFA.setPrecedenceStartState to set the
+ // appropriate start state for the precedence level rather
+ // than simply setting DFA.s0.
+ //
+ dfa.s0.configs = s0Closure
+ s0Closure = p.applyPrecedenceFilter(s0Closure)
+ s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure))
+ p.atn.edgeMu.Lock()
+ dfa.setPrecedenceStartState(p.parser.GetPrecedence(), s0)
+ p.atn.edgeMu.Unlock()
+ } else {
+ s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure))
+ dfa.setS0(s0)
+ }
+ p.atn.stateMu.Unlock()
+ }
+
+ alt, re := p.execATN(dfa, s0, input, index, outerContext)
+ parser.SetError(re)
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("DFA after predictATN: " + dfa.String(p.parser.GetLiteralNames(), nil))
+ }
+ return alt
+
+}
+
+// execATN performs ATN simulation to compute a predicted alternative based
+// upon the remaining input, but also updates the DFA cache to avoid
+// having to traverse the ATN again for the same input sequence.
+//
+// There are some key conditions we're looking for after computing a new
+// set of ATN configs (proposed DFA state):
+//
+// - If the set is empty, there is no viable alternative for current symbol
+// - Does the state uniquely predict an alternative?
+// - Does the state have a conflict that would prevent us from
+// putting it on the work list?
+//
+// We also have some key operations to do:
+//
+// - Add an edge from previous DFA state to potentially NewDFA state, D,
+// - Upon current symbol but only if adding to work list, which means in all
+// cases except no viable alternative (and possibly non-greedy decisions?)
+// - Collecting predicates and adding semantic context to DFA accept states
+// - adding rule context to context-sensitive DFA accept states
+// - Consuming an input symbol
+// - Reporting a conflict
+// - Reporting an ambiguity
+// - Reporting a context sensitivity
+// - Reporting insufficient predicates
+//
+// Cover these cases:
+//
+// - dead end
+// - single alt
+// - single alt + predicates
+// - conflict
+// - conflict + predicates
+//
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, startIndex int, outerContext ParserRuleContext) (int, RecognitionException) {
+
+ if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("execATN decision " + strconv.Itoa(dfa.decision) +
+ ", DFA state " + s0.String() +
+ ", LA(1)==" + p.getLookaheadName(input) +
+ " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + strconv.Itoa(input.LT(1).GetColumn()))
+ }
+
+ previousD := s0
+
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("s0 = " + s0.String())
+ }
+ t := input.LA(1)
+ for { // for more work
+ D := p.getExistingTargetState(previousD, t)
+ if D == nil {
+ D = p.computeTargetState(dfa, previousD, t)
+ }
+ if D == ATNSimulatorError {
+ // if any configs in previous dipped into outer context, that
+ // means that input up to t actually finished entry rule
+ // at least for SLL decision. Full LL doesn't dip into outer
+ // so don't need special case.
+ // We will get an error no matter what so delay until after
+ // decision better error message. Also, no reachable target
+ // ATN states in SLL implies LL will also get nowhere.
+ // If conflict in states that dip out, choose min since we
+ // will get error no matter what.
+ e := p.noViableAlt(input, outerContext, previousD.configs, startIndex)
+ input.Seek(startIndex)
+ alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previousD.configs, outerContext)
+ if alt != ATNInvalidAltNumber {
+ return alt, nil
+ }
+ p.parser.SetError(e)
+ return ATNInvalidAltNumber, e
+ }
+ if D.requiresFullContext && p.predictionMode != PredictionModeSLL {
+ // IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error)
+ conflictingAlts := D.configs.conflictingAlts
+ if D.predicates != nil {
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("DFA state has preds in DFA sim LL fail-over")
+ }
+ conflictIndex := input.Index()
+ if conflictIndex != startIndex {
+ input.Seek(startIndex)
+ }
+ conflictingAlts = p.evalSemanticContext(D.predicates, outerContext, true)
+ if conflictingAlts.length() == 1 {
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("Full LL avoided")
+ }
+ return conflictingAlts.minValue(), nil
+ }
+ if conflictIndex != startIndex {
+ // restore the index so Reporting the fallback to full
+ // context occurs with the index at the correct spot
+ input.Seek(conflictIndex)
+ }
+ }
+ if runtimeConfig.parserATNSimulatorDFADebug {
+ fmt.Println("ctx sensitive state " + outerContext.String(nil, nil) + " in " + D.String())
+ }
+ fullCtx := true
+ s0Closure := p.computeStartState(dfa.atnStartState, outerContext, fullCtx)
+ p.ReportAttemptingFullContext(dfa, conflictingAlts, D.configs, startIndex, input.Index())
+ alt, re := p.execATNWithFullContext(dfa, D, s0Closure, input, startIndex, outerContext)
+ return alt, re
+ }
+ if D.isAcceptState {
+ if D.predicates == nil {
+ return D.prediction, nil
+ }
+ stopIndex := input.Index()
+ input.Seek(startIndex)
+ alts := p.evalSemanticContext(D.predicates, outerContext, true)
+
+ switch alts.length() {
+ case 0:
+ return ATNInvalidAltNumber, p.noViableAlt(input, outerContext, D.configs, startIndex)
+ case 1:
+ return alts.minValue(), nil
+ default:
+ // Report ambiguity after predicate evaluation to make sure the correct set of ambig alts is Reported.
+ p.ReportAmbiguity(dfa, D, startIndex, stopIndex, false, alts, D.configs)
+ return alts.minValue(), nil
+ }
+ }
+ previousD = D
+
+ if t != TokenEOF {
+ input.Consume()
+ t = input.LA(1)
+ }
+ }
+}
+
+// Get an existing target state for an edge in the DFA. If the target state
+// for the edge has not yet been computed or is otherwise not available,
+// p method returns {@code nil}.
+//
+// @param previousD The current DFA state
+// @param t The next input symbol
+// @return The existing target DFA state for the given input symbol
+// {@code t}, or {@code nil} if the target state for p edge is not
+// already cached
+
+func (p *ParserATNSimulator) getExistingTargetState(previousD *DFAState, t int) *DFAState {
+ if t+1 < 0 {
+ return nil
+ }
+
+ p.atn.edgeMu.RLock()
+ defer p.atn.edgeMu.RUnlock()
+ edges := previousD.getEdges()
+ if edges == nil || t+1 >= len(edges) {
+ return nil
+ }
+ return previousD.getIthEdge(t + 1)
+}
+
+// Compute a target state for an edge in the DFA, and attempt to add the
+// computed state and corresponding edge to the DFA.
+//
+// @param dfa The DFA
+// @param previousD The current DFA state
+// @param t The next input symbol
+//
+// @return The computed target DFA state for the given input symbol
+// {@code t}. If {@code t} does not lead to a valid DFA state, p method
+// returns {@link //ERROR}.
+//
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) computeTargetState(dfa *DFA, previousD *DFAState, t int) *DFAState {
+ reach := p.computeReachSet(previousD.configs, t, false)
+
+ if reach == nil {
+ p.addDFAEdge(dfa, previousD, t, ATNSimulatorError)
+ return ATNSimulatorError
+ }
+ // create new target state we'll add to DFA after it's complete
+ D := NewDFAState(-1, reach)
+
+ predictedAlt := p.getUniqueAlt(reach)
+
+ if runtimeConfig.parserATNSimulatorDebug {
+ altSubSets := PredictionModegetConflictingAltSubsets(reach)
+ fmt.Println("SLL altSubSets=" + fmt.Sprint(altSubSets) +
+ ", previous=" + previousD.configs.String() +
+ ", configs=" + reach.String() +
+ ", predict=" + strconv.Itoa(predictedAlt) +
+ ", allSubsetsConflict=" +
+ fmt.Sprint(PredictionModeallSubsetsConflict(altSubSets)) +
+ ", conflictingAlts=" + p.getConflictingAlts(reach).String())
+ }
+ if predictedAlt != ATNInvalidAltNumber {
+ // NO CONFLICT, UNIQUELY PREDICTED ALT
+ D.isAcceptState = true
+ D.configs.uniqueAlt = predictedAlt
+ D.setPrediction(predictedAlt)
+ } else if PredictionModehasSLLConflictTerminatingPrediction(p.predictionMode, reach) {
+ // MORE THAN ONE VIABLE ALTERNATIVE
+ D.configs.conflictingAlts = p.getConflictingAlts(reach)
+ D.requiresFullContext = true
+ // in SLL-only mode, we will stop at p state and return the minimum alt
+ D.isAcceptState = true
+ D.setPrediction(D.configs.conflictingAlts.minValue())
+ }
+ if D.isAcceptState && D.configs.hasSemanticContext {
+ p.predicateDFAState(D, p.atn.getDecisionState(dfa.decision))
+ if D.predicates != nil {
+ D.setPrediction(ATNInvalidAltNumber)
+ }
+ }
+ // all adds to dfa are done after we've created full D state
+ D = p.addDFAEdge(dfa, previousD, t, D)
+ return D
+}
+
+func (p *ParserATNSimulator) predicateDFAState(dfaState *DFAState, decisionState DecisionState) {
+ // We need to test all predicates, even in DFA states that
+ // uniquely predict alternative.
+ nalts := len(decisionState.GetTransitions())
+ // Update DFA so reach becomes accept state with (predicate,alt)
+ // pairs if preds found for conflicting alts
+ altsToCollectPredsFrom := p.getConflictingAltsOrUniqueAlt(dfaState.configs)
+ altToPred := p.getPredsForAmbigAlts(altsToCollectPredsFrom, dfaState.configs, nalts)
+ if altToPred != nil {
+ dfaState.predicates = p.getPredicatePredictions(altsToCollectPredsFrom, altToPred)
+ dfaState.setPrediction(ATNInvalidAltNumber) // make sure we use preds
+ } else {
+ // There are preds in configs but they might go away
+ // when OR'd together like {p}? || NONE == NONE. If neither
+ // alt has preds, resolve to min alt
+ dfaState.setPrediction(altsToCollectPredsFrom.minValue())
+ }
+}
+
+// comes back with reach.uniqueAlt set to a valid alt
+//
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 *ATNConfigSet, input TokenStream, startIndex int, outerContext ParserRuleContext) (int, RecognitionException) {
+
+ if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("execATNWithFullContext " + s0.String())
+ }
+
+ fullCtx := true
+ foundExactAmbig := false
+ var reach *ATNConfigSet
+ previous := s0
+ input.Seek(startIndex)
+ t := input.LA(1)
+ predictedAlt := -1
+
+ for { // for more work
+ reach = p.computeReachSet(previous, t, fullCtx)
+ if reach == nil {
+ // if any configs in previous dipped into outer context, that
+ // means that input up to t actually finished entry rule
+ // at least for LL decision. Full LL doesn't dip into outer
+ // so don't need special case.
+ // We will get an error no matter what so delay until after
+ // decision better error message. Also, no reachable target
+ // ATN states in SLL implies LL will also get nowhere.
+ // If conflict in states that dip out, choose min since we
+ // will get error no matter what.
+ input.Seek(startIndex)
+ alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previous, outerContext)
+ if alt != ATNInvalidAltNumber {
+ return alt, nil
+ }
+ return alt, p.noViableAlt(input, outerContext, previous, startIndex)
+ }
+ altSubSets := PredictionModegetConflictingAltSubsets(reach)
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("LL altSubSets=" + fmt.Sprint(altSubSets) + ", predict=" +
+ strconv.Itoa(PredictionModegetUniqueAlt(altSubSets)) + ", resolvesToJustOneViableAlt=" +
+ fmt.Sprint(PredictionModeresolvesToJustOneViableAlt(altSubSets)))
+ }
+ reach.uniqueAlt = p.getUniqueAlt(reach)
+ // unique prediction?
+ if reach.uniqueAlt != ATNInvalidAltNumber {
+ predictedAlt = reach.uniqueAlt
+ break
+ }
+ if p.predictionMode != PredictionModeLLExactAmbigDetection {
+ predictedAlt = PredictionModeresolvesToJustOneViableAlt(altSubSets)
+ if predictedAlt != ATNInvalidAltNumber {
+ break
+ }
+ } else {
+ // In exact ambiguity mode, we never try to terminate early.
+ // Just keeps scarfing until we know what the conflict is
+ if PredictionModeallSubsetsConflict(altSubSets) && PredictionModeallSubsetsEqual(altSubSets) {
+ foundExactAmbig = true
+ predictedAlt = PredictionModegetSingleViableAlt(altSubSets)
+ break
+ }
+ // else there are multiple non-conflicting subsets or
+ // we're not sure what the ambiguity is yet.
+ // So, keep going.
+ }
+ previous = reach
+ if t != TokenEOF {
+ input.Consume()
+ t = input.LA(1)
+ }
+ }
+ // If the configuration set uniquely predicts an alternative,
+ // without conflict, then we know that it's a full LL decision
+ // not SLL.
+ if reach.uniqueAlt != ATNInvalidAltNumber {
+ p.ReportContextSensitivity(dfa, predictedAlt, reach, startIndex, input.Index())
+ return predictedAlt, nil
+ }
+ // We do not check predicates here because we have checked them
+ // on-the-fly when doing full context prediction.
+
+ //
+ // In non-exact ambiguity detection mode, we might actually be able to
+ // detect an exact ambiguity, but I'm not going to spend the cycles
+ // needed to check. We only emit ambiguity warnings in exact ambiguity
+ // mode.
+ //
+ // For example, we might know that we have conflicting configurations.
+ // But, that does not mean that there is no way forward without a
+ // conflict. It's possible to have non-conflicting alt subsets as in:
+ //
+ // altSubSets=[{1, 2}, {1, 2}, {1}, {1, 2}]
+ //
+ // from
+ //
+ // [(17,1,[5 $]), (13,1,[5 10 $]), (21,1,[5 10 $]), (11,1,[$]),
+ // (13,2,[5 10 $]), (21,2,[5 10 $]), (11,2,[$])]
+ //
+ // In p case, (17,1,[5 $]) indicates there is some next sequence that
+ // would resolve p without conflict to alternative 1. Any other viable
+ // next sequence, however, is associated with a conflict. We stop
+ // looking for input because no amount of further lookahead will alter
+ // the fact that we should predict alternative 1. We just can't say for
+ // sure that there is an ambiguity without looking further.
+
+ p.ReportAmbiguity(dfa, D, startIndex, input.Index(), foundExactAmbig, reach.Alts(), reach)
+
+ return predictedAlt, nil
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) computeReachSet(closure *ATNConfigSet, t int, fullCtx bool) *ATNConfigSet {
+ if p.mergeCache == nil {
+ p.mergeCache = NewJPCMap(ReachSetCollection, "Merge cache for computeReachSet()")
+ }
+ intermediate := NewATNConfigSet(fullCtx)
+
+ // Configurations already in a rule stop state indicate reaching the end
+ // of the decision rule (local context) or end of the start rule (full
+ // context). Once reached, these configurations are never updated by a
+ // closure operation, so they are handled separately for the performance
+ // advantage of having a smaller intermediate set when calling closure.
+ //
+ // For full-context reach operations, separate handling is required to
+ // ensure that the alternative Matching the longest overall sequence is
+ // chosen when multiple such configurations can Match the input.
+
+ var skippedStopStates []*ATNConfig
+
+ // First figure out where we can reach on input t
+ for _, c := range closure.configs {
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("testing " + p.GetTokenName(t) + " at " + c.String())
+ }
+
+ if _, ok := c.GetState().(*RuleStopState); ok {
+ if fullCtx || t == TokenEOF {
+ skippedStopStates = append(skippedStopStates, c)
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("added " + c.String() + " to SkippedStopStates")
+ }
+ }
+ continue
+ }
+
+ for _, trans := range c.GetState().GetTransitions() {
+ target := p.getReachableTarget(trans, t)
+ if target != nil {
+ cfg := NewATNConfig4(c, target)
+ intermediate.Add(cfg, p.mergeCache)
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("added " + cfg.String() + " to intermediate")
+ }
+ }
+ }
+ }
+
+ // Now figure out where the reach operation can take us...
+ var reach *ATNConfigSet
+
+ // This block optimizes the reach operation for intermediate sets which
+ // trivially indicate a termination state for the overall
+ // AdaptivePredict operation.
+ //
+ // The conditions assume that intermediate
+ // contains all configurations relevant to the reach set, but p
+ // condition is not true when one or more configurations have been
+ // withheld in SkippedStopStates, or when the current symbol is EOF.
+ //
+ if skippedStopStates == nil && t != TokenEOF {
+ if len(intermediate.configs) == 1 {
+ // Don't pursue the closure if there is just one state.
+ // It can only have one alternative just add to result
+ // Also don't pursue the closure if there is unique alternative
+ // among the configurations.
+ reach = intermediate
+ } else if p.getUniqueAlt(intermediate) != ATNInvalidAltNumber {
+ // Also don't pursue the closure if there is unique alternative
+ // among the configurations.
+ reach = intermediate
+ }
+ }
+ // If the reach set could not be trivially determined, perform a closure
+ // operation on the intermediate set to compute its initial value.
+ //
+ if reach == nil {
+ reach = NewATNConfigSet(fullCtx)
+ closureBusy := NewClosureBusy("ParserATNSimulator.computeReachSet() make a closureBusy")
+ treatEOFAsEpsilon := t == TokenEOF
+ amount := len(intermediate.configs)
+ for k := 0; k < amount; k++ {
+ p.closure(intermediate.configs[k], reach, closureBusy, false, fullCtx, treatEOFAsEpsilon)
+ }
+ }
+ if t == TokenEOF {
+ // After consuming EOF no additional input is possible, so we are
+ // only interested in configurations which reached the end of the
+ // decision rule (local context) or end of the start rule (full
+ // context). Update reach to contain only these configurations. This
+ // handles both explicit EOF transitions in the grammar and implicit
+ // EOF transitions following the end of the decision or start rule.
+ //
+ // When reach==intermediate, no closure operation was performed. In
+ // p case, removeAllConfigsNotInRuleStopState needs to check for
+ // reachable rule stop states as well as configurations already in
+ // a rule stop state.
+ //
+ // This is handled before the configurations in SkippedStopStates,
+ // because any configurations potentially added from that list are
+ // already guaranteed to meet this condition whether it's
+ // required.
+ //
+ reach = p.removeAllConfigsNotInRuleStopState(reach, reach.Equals(intermediate))
+ }
+ // If SkippedStopStates!=nil, then it contains at least one
+ // configuration. For full-context reach operations, these
+ // configurations reached the end of the start rule, in which case we
+ // only add them back to reach if no configuration during the current
+ // closure operation reached such a state. This ensures AdaptivePredict
+ // chooses an alternative Matching the longest overall sequence when
+ // multiple alternatives are viable.
+ //
+ if skippedStopStates != nil && ((!fullCtx) || (!PredictionModehasConfigInRuleStopState(reach))) {
+ for l := 0; l < len(skippedStopStates); l++ {
+ reach.Add(skippedStopStates[l], p.mergeCache)
+ }
+ }
+
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("computeReachSet " + closure.String() + " -> " + reach.String())
+ }
+
+ if len(reach.configs) == 0 {
+ return nil
+ }
+
+ return reach
+}
+
+// removeAllConfigsNotInRuleStopState returns a configuration set containing only the configurations from
+// configs which are in a [RuleStopState]. If all
+// configurations in configs are already in a rule stop state, this
+// method simply returns configs.
+//
+// When lookToEndOfRule is true, this method uses
+// [ATN].[NextTokens] for each configuration in configs which is
+// not already in a rule stop state to see if a rule stop state is reachable
+// from the configuration via epsilon-only transitions.
+//
+// When lookToEndOfRule is true, this method checks for rule stop states
+// reachable by epsilon-only transitions from each configuration in
+// configs.
+//
+// The func returns configs if all configurations in configs are in a
+// rule stop state, otherwise it returns a new configuration set containing only
+// the configurations from configs which are in a rule stop state
+func (p *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs *ATNConfigSet, lookToEndOfRule bool) *ATNConfigSet {
+ if PredictionModeallConfigsInRuleStopStates(configs) {
+ return configs
+ }
+ result := NewATNConfigSet(configs.fullCtx)
+ for _, config := range configs.configs {
+ if _, ok := config.GetState().(*RuleStopState); ok {
+ result.Add(config, p.mergeCache)
+ continue
+ }
+ if lookToEndOfRule && config.GetState().GetEpsilonOnlyTransitions() {
+ NextTokens := p.atn.NextTokens(config.GetState(), nil)
+ if NextTokens.contains(TokenEpsilon) {
+ endOfRuleState := p.atn.ruleToStopState[config.GetState().GetRuleIndex()]
+ result.Add(NewATNConfig4(config, endOfRuleState), p.mergeCache)
+ }
+ }
+ }
+ return result
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, fullCtx bool) *ATNConfigSet {
+ // always at least the implicit call to start rule
+ initialContext := predictionContextFromRuleContext(p.atn, ctx)
+ configs := NewATNConfigSet(fullCtx)
+ if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("computeStartState from ATN state " + a.String() +
+ " initialContext=" + initialContext.String())
+ }
+
+ for i := 0; i < len(a.GetTransitions()); i++ {
+ target := a.GetTransitions()[i].getTarget()
+ c := NewATNConfig6(target, i+1, initialContext)
+ closureBusy := NewClosureBusy("ParserATNSimulator.computeStartState() make a closureBusy")
+ p.closure(c, configs, closureBusy, true, fullCtx, false)
+ }
+ return configs
+}
+
+// applyPrecedenceFilter transforms the start state computed by
+// [computeStartState] to the special start state used by a
+// precedence [DFA] for a particular precedence value. The transformation
+// process applies the following changes to the start state's configuration
+// set.
+//
+// 1. Evaluate the precedence predicates for each configuration using
+// [SemanticContext].evalPrecedence.
+// 2. Remove all configurations which predict an alternative greater than
+// 1, for which another configuration that predicts alternative 1 is in the
+// same ATN state with the same prediction context.
+//
+// Transformation 2 is valid for the following reasons:
+//
+// - The closure block cannot contain any epsilon transitions which bypass
+// the body of the closure, so all states reachable via alternative 1 are
+// part of the precedence alternatives of the transformed left-recursive
+// rule.
+// - The "primary" portion of a left recursive rule cannot contain an
+// epsilon transition, so the only way an alternative other than 1 can exist
+// in a state that is also reachable via alternative 1 is by nesting calls
+// to the left-recursive rule, with the outer calls not being at the
+// preferred precedence level.
+//
+// The prediction context must be considered by this filter to address
+// situations like the following:
+//
+// grammar TA
+// prog: statement* EOF
+// statement: letterA | statement letterA 'b'
+// letterA: 'a'
+//
+// In the above grammar, the [ATN] state immediately before the token
+// reference 'a' in letterA is reachable from the left edge
+// of both the primary and closure blocks of the left-recursive rule
+// statement. The prediction context associated with each of these
+// configurations distinguishes between them, and prevents the alternative
+// which stepped out to prog, and then back in to statement
+// from being eliminated by the filter.
+//
+// The func returns the transformed configuration set representing the start state
+// for a precedence [DFA] at a particular precedence level (determined by
+// calling [Parser].getPrecedence).
+func (p *ParserATNSimulator) applyPrecedenceFilter(configs *ATNConfigSet) *ATNConfigSet {
+
+ statesFromAlt1 := make(map[int]*PredictionContext)
+ configSet := NewATNConfigSet(configs.fullCtx)
+
+ for _, config := range configs.configs {
+ // handle alt 1 first
+ if config.GetAlt() != 1 {
+ continue
+ }
+ updatedContext := config.GetSemanticContext().evalPrecedence(p.parser, p.outerContext)
+ if updatedContext == nil {
+ // the configuration was eliminated
+ continue
+ }
+ statesFromAlt1[config.GetState().GetStateNumber()] = config.GetContext()
+ if updatedContext != config.GetSemanticContext() {
+ configSet.Add(NewATNConfig2(config, updatedContext), p.mergeCache)
+ } else {
+ configSet.Add(config, p.mergeCache)
+ }
+ }
+ for _, config := range configs.configs {
+
+ if config.GetAlt() == 1 {
+ // already handled
+ continue
+ }
+ // In the future, p elimination step could be updated to also
+ // filter the prediction context for alternatives predicting alt>1
+ // (basically a graph subtraction algorithm).
+ if !config.getPrecedenceFilterSuppressed() {
+ context := statesFromAlt1[config.GetState().GetStateNumber()]
+ if context != nil && context.Equals(config.GetContext()) {
+ // eliminated
+ continue
+ }
+ }
+ configSet.Add(config, p.mergeCache)
+ }
+ return configSet
+}
+
+func (p *ParserATNSimulator) getReachableTarget(trans Transition, ttype int) ATNState {
+ if trans.Matches(ttype, 0, p.atn.maxTokenType) {
+ return trans.getTarget()
+ }
+
+ return nil
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) getPredsForAmbigAlts(ambigAlts *BitSet, configs *ATNConfigSet, nalts int) []SemanticContext {
+
+ altToPred := make([]SemanticContext, nalts+1)
+ for _, c := range configs.configs {
+ if ambigAlts.contains(c.GetAlt()) {
+ altToPred[c.GetAlt()] = SemanticContextorContext(altToPred[c.GetAlt()], c.GetSemanticContext())
+ }
+ }
+ nPredAlts := 0
+ for i := 1; i <= nalts; i++ {
+ pred := altToPred[i]
+ if pred == nil {
+ altToPred[i] = SemanticContextNone
+ } else if pred != SemanticContextNone {
+ nPredAlts++
+ }
+ }
+ // unambiguous alts are nil in altToPred
+ if nPredAlts == 0 {
+ altToPred = nil
+ }
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("getPredsForAmbigAlts result " + fmt.Sprint(altToPred))
+ }
+ return altToPred
+}
+
+func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPred []SemanticContext) []*PredPrediction {
+ pairs := make([]*PredPrediction, 0)
+ containsPredicate := false
+ for i := 1; i < len(altToPred); i++ {
+ pred := altToPred[i]
+ // un-predicated is indicated by SemanticContextNONE
+ if ambigAlts != nil && ambigAlts.contains(i) {
+ pairs = append(pairs, NewPredPrediction(pred, i))
+ }
+ if pred != SemanticContextNone {
+ containsPredicate = true
+ }
+ }
+ if !containsPredicate {
+ return nil
+ }
+ return pairs
+}
+
+// getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule is used to improve the localization of error messages by
+// choosing an alternative rather than panic a NoViableAltException in particular prediction scenarios where the
+// Error state was reached during [ATN] simulation.
+//
+// The default implementation of this method uses the following
+// algorithm to identify an [ATN] configuration which successfully parsed the
+// decision entry rule. Choosing such an alternative ensures that the
+// [ParserRuleContext] returned by the calling rule will be complete
+// and valid, and the syntax error will be Reported later at a more
+// localized location.
+//
+// - If a syntactically valid path or paths reach the end of the decision rule, and
+// they are semantically valid if predicated, return the min associated alt.
+// - Else, if a semantically invalid but syntactically valid path exist
+// or paths exist, return the minimum associated alt.
+// - Otherwise, return [ATNInvalidAltNumber].
+//
+// In some scenarios, the algorithm described above could predict an
+// alternative which will result in a [FailedPredicateException] in
+// the parser. Specifically, this could occur if the only configuration
+// capable of successfully parsing to the end of the decision rule is
+// blocked by a semantic predicate. By choosing this alternative within
+// [AdaptivePredict] instead of panic a [NoViableAltException], the resulting
+// [FailedPredicateException] in the parser will identify the specific
+// predicate which is preventing the parser from successfully parsing the
+// decision rule, which helps developers identify and correct logic errors
+// in semantic predicates.
+//
+// pass in the configs holding ATN configurations which were valid immediately before
+// the ERROR state was reached, outerContext as the initial parser context from the paper
+// or the parser stack at the instant before prediction commences.
+//
+// Teh func returns the value to return from [AdaptivePredict], or
+// [ATNInvalidAltNumber] if a suitable alternative was not
+// identified and [AdaptivePredict] should report an error instead.
+func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs *ATNConfigSet, outerContext ParserRuleContext) int {
+ cfgs := p.splitAccordingToSemanticValidity(configs, outerContext)
+ semValidConfigs := cfgs[0]
+ semInvalidConfigs := cfgs[1]
+ alt := p.GetAltThatFinishedDecisionEntryRule(semValidConfigs)
+ if alt != ATNInvalidAltNumber { // semantically/syntactically viable path exists
+ return alt
+ }
+ // Is there a syntactically valid path with a failed pred?
+ if len(semInvalidConfigs.configs) > 0 {
+ alt = p.GetAltThatFinishedDecisionEntryRule(semInvalidConfigs)
+ if alt != ATNInvalidAltNumber { // syntactically viable path exists
+ return alt
+ }
+ }
+ return ATNInvalidAltNumber
+}
+
+func (p *ParserATNSimulator) GetAltThatFinishedDecisionEntryRule(configs *ATNConfigSet) int {
+ alts := NewIntervalSet()
+
+ for _, c := range configs.configs {
+ _, ok := c.GetState().(*RuleStopState)
+
+ if c.GetReachesIntoOuterContext() > 0 || (ok && c.GetContext().hasEmptyPath()) {
+ alts.addOne(c.GetAlt())
+ }
+ }
+ if alts.length() == 0 {
+ return ATNInvalidAltNumber
+ }
+
+ return alts.first()
+}
+
+// Walk the list of configurations and split them according to
+// those that have preds evaluating to true/false. If no pred, assume
+// true pred and include in succeeded set. Returns Pair of sets.
+//
+// Create a NewSet so as not to alter the incoming parameter.
+//
+// Assumption: the input stream has been restored to the starting point
+// prediction, which is where predicates need to evaluate.
+
+type ATNConfigSetPair struct {
+ item0, item1 *ATNConfigSet
+}
+
+func (p *ParserATNSimulator) splitAccordingToSemanticValidity(configs *ATNConfigSet, outerContext ParserRuleContext) []*ATNConfigSet {
+ succeeded := NewATNConfigSet(configs.fullCtx)
+ failed := NewATNConfigSet(configs.fullCtx)
+
+ for _, c := range configs.configs {
+ if c.GetSemanticContext() != SemanticContextNone {
+ predicateEvaluationResult := c.GetSemanticContext().evaluate(p.parser, outerContext)
+ if predicateEvaluationResult {
+ succeeded.Add(c, nil)
+ } else {
+ failed.Add(c, nil)
+ }
+ } else {
+ succeeded.Add(c, nil)
+ }
+ }
+ return []*ATNConfigSet{succeeded, failed}
+}
+
+// evalSemanticContext looks through a list of predicate/alt pairs, returning alts for the
+// pairs that win. A [SemanticContextNone] predicate indicates an alt containing an
+// un-predicated runtimeConfig which behaves as "always true." If !complete
+// then we stop at the first predicate that evaluates to true. This
+// includes pairs with nil predicates.
+//
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPrediction, outerContext ParserRuleContext, complete bool) *BitSet {
+ predictions := NewBitSet()
+ for i := 0; i < len(predPredictions); i++ {
+ pair := predPredictions[i]
+ if pair.pred == SemanticContextNone {
+ predictions.add(pair.alt)
+ if !complete {
+ break
+ }
+ continue
+ }
+
+ predicateEvaluationResult := pair.pred.evaluate(p.parser, outerContext)
+ if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorDFADebug {
+ fmt.Println("eval pred " + pair.String() + "=" + fmt.Sprint(predicateEvaluationResult))
+ }
+ if predicateEvaluationResult {
+ if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorDFADebug {
+ fmt.Println("PREDICT " + fmt.Sprint(pair.alt))
+ }
+ predictions.add(pair.alt)
+ if !complete {
+ break
+ }
+ }
+ }
+ return predictions
+}
+
+func (p *ParserATNSimulator) closure(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx, treatEOFAsEpsilon bool) {
+ initialDepth := 0
+ p.closureCheckingStopState(config, configs, closureBusy, collectPredicates,
+ fullCtx, initialDepth, treatEOFAsEpsilon)
+}
+
+func (p *ParserATNSimulator) closureCheckingStopState(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("closure(" + config.String() + ")")
+ }
+
+ var stack []*ATNConfig
+ visited := make(map[*ATNConfig]bool)
+
+ stack = append(stack, config)
+
+ for len(stack) > 0 {
+ currConfig := stack[len(stack)-1]
+ stack = stack[:len(stack)-1]
+
+ if _, ok := visited[currConfig]; ok {
+ continue
+ }
+ visited[currConfig] = true
+
+ if _, ok := currConfig.GetState().(*RuleStopState); ok {
+ // We hit rule end. If we have context info, use it
+ // run thru all possible stack tops in ctx
+ if !currConfig.GetContext().isEmpty() {
+ for i := 0; i < currConfig.GetContext().length(); i++ {
+ if currConfig.GetContext().getReturnState(i) == BasePredictionContextEmptyReturnState {
+ if fullCtx {
+ nb := NewATNConfig1(currConfig, currConfig.GetState(), BasePredictionContextEMPTY)
+ configs.Add(nb, p.mergeCache)
+ continue
+ } else {
+ // we have no context info, just chase follow links (if greedy)
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("FALLING off rule " + p.getRuleName(currConfig.GetState().GetRuleIndex()))
+ }
+ p.closureWork(currConfig, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon)
+ }
+ continue
+ }
+ returnState := p.atn.states[currConfig.GetContext().getReturnState(i)]
+ newContext := currConfig.GetContext().GetParent(i) // "pop" return state
+
+ c := NewATNConfig5(returnState, currConfig.GetAlt(), newContext, currConfig.GetSemanticContext())
+ // While we have context to pop back from, we may have
+ // gotten that context AFTER having falling off a rule.
+ // Make sure we track that we are now out of context.
+ c.SetReachesIntoOuterContext(currConfig.GetReachesIntoOuterContext())
+
+ stack = append(stack, c)
+ }
+ continue
+ } else if fullCtx {
+ // reached end of start rule
+ configs.Add(currConfig, p.mergeCache)
+ continue
+ } else {
+ // else if we have no context info, just chase follow links (if greedy)
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("FALLING off rule " + p.getRuleName(currConfig.GetState().GetRuleIndex()))
+ }
+ }
+ }
+
+ p.closureWork(currConfig, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon)
+ }
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) closureCheckingStopStateRecursive(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("closure(" + config.String() + ")")
+ }
+
+ if _, ok := config.GetState().(*RuleStopState); ok {
+ // We hit rule end. If we have context info, use it
+ // run thru all possible stack tops in ctx
+ if !config.GetContext().isEmpty() {
+ for i := 0; i < config.GetContext().length(); i++ {
+ if config.GetContext().getReturnState(i) == BasePredictionContextEmptyReturnState {
+ if fullCtx {
+ nb := NewATNConfig1(config, config.GetState(), BasePredictionContextEMPTY)
+ configs.Add(nb, p.mergeCache)
+ continue
+ } else {
+ // we have no context info, just chase follow links (if greedy)
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex()))
+ }
+ p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon)
+ }
+ continue
+ }
+ returnState := p.atn.states[config.GetContext().getReturnState(i)]
+ newContext := config.GetContext().GetParent(i) // "pop" return state
+
+ c := NewATNConfig5(returnState, config.GetAlt(), newContext, config.GetSemanticContext())
+ // While we have context to pop back from, we may have
+ // gotten that context AFTER having falling off a rule.
+ // Make sure we track that we are now out of context.
+ c.SetReachesIntoOuterContext(config.GetReachesIntoOuterContext())
+ p.closureCheckingStopState(c, configs, closureBusy, collectPredicates, fullCtx, depth-1, treatEOFAsEpsilon)
+ }
+ return
+ } else if fullCtx {
+ // reached end of start rule
+ configs.Add(config, p.mergeCache)
+ return
+ } else {
+ // else if we have no context info, just chase follow links (if greedy)
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex()))
+ }
+ }
+ }
+ p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon)
+}
+
+// Do the actual work of walking epsilon edges
+//
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) closureWork(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
+ state := config.GetState()
+ // optimization
+ if !state.GetEpsilonOnlyTransitions() {
+ configs.Add(config, p.mergeCache)
+ // make sure to not return here, because EOF transitions can act as
+ // both epsilon transitions and non-epsilon transitions.
+ }
+ for i := 0; i < len(state.GetTransitions()); i++ {
+ if i == 0 && p.canDropLoopEntryEdgeInLeftRecursiveRule(config) {
+ continue
+ }
+
+ t := state.GetTransitions()[i]
+ _, ok := t.(*ActionTransition)
+ continueCollecting := collectPredicates && !ok
+ c := p.getEpsilonTarget(config, t, continueCollecting, depth == 0, fullCtx, treatEOFAsEpsilon)
+ if c != nil {
+ newDepth := depth
+
+ if _, ok := config.GetState().(*RuleStopState); ok {
+ // target fell off end of rule mark resulting c as having dipped into outer context
+ // We can't get here if incoming config was rule stop and we had context
+ // track how far we dip into outer context. Might
+ // come in handy and we avoid evaluating context dependent
+ // preds if this is > 0.
+
+ if p.dfa != nil && p.dfa.getPrecedenceDfa() {
+ if t.(*EpsilonTransition).outermostPrecedenceReturn == p.dfa.atnStartState.GetRuleIndex() {
+ c.setPrecedenceFilterSuppressed(true)
+ }
+ }
+
+ c.SetReachesIntoOuterContext(c.GetReachesIntoOuterContext() + 1)
+
+ _, present := closureBusy.Put(c)
+ if present {
+ // avoid infinite recursion for right-recursive rules
+ continue
+ }
+
+ configs.dipsIntoOuterContext = true // TODO: can remove? only care when we add to set per middle of this method
+ newDepth--
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("dips into outer ctx: " + c.String())
+ }
+ } else {
+
+ if !t.getIsEpsilon() {
+ _, present := closureBusy.Put(c)
+ if present {
+ // avoid infinite recursion for EOF* and EOF+
+ continue
+ }
+ }
+ if _, ok := t.(*RuleTransition); ok {
+ // latch when newDepth goes negative - once we step out of the entry context we can't return
+ if newDepth >= 0 {
+ newDepth++
+ }
+ }
+ }
+ p.closureCheckingStopState(c, configs, closureBusy, continueCollecting, fullCtx, newDepth, treatEOFAsEpsilon)
+ }
+ }
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) canDropLoopEntryEdgeInLeftRecursiveRule(config *ATNConfig) bool {
+ if !runtimeConfig.lRLoopEntryBranchOpt {
+ return false
+ }
+
+ _p := config.GetState()
+
+ // First check to see if we are in StarLoopEntryState generated during
+ // left-recursion elimination. For efficiency, also check if
+ // the context has an empty stack case. If so, it would mean
+ // global FOLLOW so we can't perform optimization
+ if _p.GetStateType() != ATNStateStarLoopEntry {
+ return false
+ }
+ startLoop, ok := _p.(*StarLoopEntryState)
+ if !ok {
+ return false
+ }
+ if !startLoop.precedenceRuleDecision ||
+ config.GetContext().isEmpty() ||
+ config.GetContext().hasEmptyPath() {
+ return false
+ }
+
+ // Require all return states to return back to the same rule
+ // that p is in.
+ numCtxs := config.GetContext().length()
+ for i := 0; i < numCtxs; i++ {
+ returnState := p.atn.states[config.GetContext().getReturnState(i)]
+ if returnState.GetRuleIndex() != _p.GetRuleIndex() {
+ return false
+ }
+ }
+ x := _p.GetTransitions()[0].getTarget()
+ decisionStartState := x.(BlockStartState)
+ blockEndStateNum := decisionStartState.getEndState().stateNumber
+ blockEndState := p.atn.states[blockEndStateNum].(*BlockEndState)
+
+ // Verify that the top of each stack context leads to loop entry/exit
+ // state through epsilon edges and w/o leaving rule.
+
+ for i := 0; i < numCtxs; i++ { // for each stack context
+ returnStateNumber := config.GetContext().getReturnState(i)
+ returnState := p.atn.states[returnStateNumber]
+
+ // all states must have single outgoing epsilon edge
+ if len(returnState.GetTransitions()) != 1 || !returnState.GetTransitions()[0].getIsEpsilon() {
+ return false
+ }
+
+ // Look for prefix op case like 'not expr', (' type ')' expr
+ returnStateTarget := returnState.GetTransitions()[0].getTarget()
+ if returnState.GetStateType() == ATNStateBlockEnd && returnStateTarget == _p {
+ continue
+ }
+
+ // Look for 'expr op expr' or case where expr's return state is block end
+ // of (...)* internal block; the block end points to loop back
+ // which points to p but we don't need to check that
+ if returnState == blockEndState {
+ continue
+ }
+
+ // Look for ternary expr ? expr : expr. The return state points at block end,
+ // which points at loop entry state
+ if returnStateTarget == blockEndState {
+ continue
+ }
+
+ // Look for complex prefix 'between expr and expr' case where 2nd expr's
+ // return state points at block end state of (...)* internal block
+ if returnStateTarget.GetStateType() == ATNStateBlockEnd &&
+ len(returnStateTarget.GetTransitions()) == 1 &&
+ returnStateTarget.GetTransitions()[0].getIsEpsilon() &&
+ returnStateTarget.GetTransitions()[0].getTarget() == _p {
+ continue
+ }
+
+ // anything else ain't conforming
+ return false
+ }
+
+ return true
+}
+
+func (p *ParserATNSimulator) getRuleName(index int) string {
+ if p.parser != nil && index >= 0 {
+ return p.parser.GetRuleNames()[index]
+ }
+ var sb strings.Builder
+ sb.Grow(32)
+
+ sb.WriteString("')
+ return sb.String()
+}
+
+func (p *ParserATNSimulator) getEpsilonTarget(config *ATNConfig, t Transition, collectPredicates, inContext, fullCtx, treatEOFAsEpsilon bool) *ATNConfig {
+
+ switch t.getSerializationType() {
+ case TransitionRULE:
+ return p.ruleTransition(config, t.(*RuleTransition))
+ case TransitionPRECEDENCE:
+ return p.precedenceTransition(config, t.(*PrecedencePredicateTransition), collectPredicates, inContext, fullCtx)
+ case TransitionPREDICATE:
+ return p.predTransition(config, t.(*PredicateTransition), collectPredicates, inContext, fullCtx)
+ case TransitionACTION:
+ return p.actionTransition(config, t.(*ActionTransition))
+ case TransitionEPSILON:
+ return NewATNConfig4(config, t.getTarget())
+ case TransitionATOM, TransitionRANGE, TransitionSET:
+ // EOF transitions act like epsilon transitions after the first EOF
+ // transition is traversed
+ if treatEOFAsEpsilon {
+ if t.Matches(TokenEOF, 0, 1) {
+ return NewATNConfig4(config, t.getTarget())
+ }
+ }
+ return nil
+ default:
+ return nil
+ }
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) actionTransition(config *ATNConfig, t *ActionTransition) *ATNConfig {
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("ACTION edge " + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex))
+ }
+ return NewATNConfig4(config, t.getTarget())
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) precedenceTransition(config *ATNConfig,
+ pt *PrecedencePredicateTransition, collectPredicates, inContext, fullCtx bool) *ATNConfig {
+
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " +
+ strconv.Itoa(pt.precedence) + ">=_p, ctx dependent=true")
+ if p.parser != nil {
+ fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil)))
+ }
+ }
+ var c *ATNConfig
+ if collectPredicates && inContext {
+ if fullCtx {
+ // In full context mode, we can evaluate predicates on-the-fly
+ // during closure, which dramatically reduces the size of
+ // the runtimeConfig sets. It also obviates the need to test predicates
+ // later during conflict resolution.
+ currentPosition := p.input.Index()
+ p.input.Seek(p.startIndex)
+ predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext)
+ p.input.Seek(currentPosition)
+ if predSucceeds {
+ c = NewATNConfig4(config, pt.getTarget()) // no pred context
+ }
+ } else {
+ newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate())
+ c = NewATNConfig3(config, pt.getTarget(), newSemCtx)
+ }
+ } else {
+ c = NewATNConfig4(config, pt.getTarget())
+ }
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("runtimeConfig from pred transition=" + c.String())
+ }
+ return c
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) predTransition(config *ATNConfig, pt *PredicateTransition, collectPredicates, inContext, fullCtx bool) *ATNConfig {
+
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " + strconv.Itoa(pt.ruleIndex) +
+ ":" + strconv.Itoa(pt.predIndex) + ", ctx dependent=" + fmt.Sprint(pt.isCtxDependent))
+ if p.parser != nil {
+ fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil)))
+ }
+ }
+ var c *ATNConfig
+ if collectPredicates && (!pt.isCtxDependent || inContext) {
+ if fullCtx {
+ // In full context mode, we can evaluate predicates on-the-fly
+ // during closure, which dramatically reduces the size of
+ // the config sets. It also obviates the need to test predicates
+ // later during conflict resolution.
+ currentPosition := p.input.Index()
+ p.input.Seek(p.startIndex)
+ predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext)
+ p.input.Seek(currentPosition)
+ if predSucceeds {
+ c = NewATNConfig4(config, pt.getTarget()) // no pred context
+ }
+ } else {
+ newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate())
+ c = NewATNConfig3(config, pt.getTarget(), newSemCtx)
+ }
+ } else {
+ c = NewATNConfig4(config, pt.getTarget())
+ }
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("config from pred transition=" + c.String())
+ }
+ return c
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) ruleTransition(config *ATNConfig, t *RuleTransition) *ATNConfig {
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("CALL rule " + p.getRuleName(t.getTarget().GetRuleIndex()) + ", ctx=" + config.GetContext().String())
+ }
+ returnState := t.followState
+ newContext := SingletonBasePredictionContextCreate(config.GetContext(), returnState.GetStateNumber())
+ return NewATNConfig1(config, t.getTarget(), newContext)
+}
+
+func (p *ParserATNSimulator) getConflictingAlts(configs *ATNConfigSet) *BitSet {
+ altsets := PredictionModegetConflictingAltSubsets(configs)
+ return PredictionModeGetAlts(altsets)
+}
+
+// getConflictingAltsOrUniqueAlt Sam pointed out a problem with the previous definition, v3, of
+// ambiguous states. If we have another state associated with conflicting
+// alternatives, we should keep going. For example, the following grammar
+//
+// s : (ID | ID ID?) ;
+//
+// When the [ATN] simulation reaches the state before ;, it has a [DFA]
+// state that looks like:
+//
+// [12|1|[], 6|2|[], 12|2|[]].
+//
+// Naturally
+//
+// 12|1|[] and 12|2|[]
+//
+// conflict, but we cannot stop processing this node
+// because alternative to has another way to continue, via
+//
+// [6|2|[]].
+//
+// The key is that we have a single state that has config's only associated
+// with a single alternative, 2, and crucially the state transitions
+// among the configurations are all non-epsilon transitions. That means
+// we don't consider any conflicts that include alternative 2. So, we
+// ignore the conflict between alts 1 and 2. We ignore a set of
+// conflicting alts when there is an intersection with an alternative
+// associated with a single alt state in the state config-list map.
+//
+// It's also the case that we might have two conflicting configurations but
+// also a 3rd non-conflicting configuration for a different alternative:
+//
+// [1|1|[], 1|2|[], 8|3|[]].
+//
+// This can come about from grammar:
+//
+// a : A | A | A B
+//
+// After Matching input A, we reach the stop state for rule A, state 1.
+// State 8 is the state right before B. Clearly alternatives 1 and 2
+// conflict and no amount of further lookahead will separate the two.
+// However, alternative 3 will be able to continue, so we do not
+// stop working on this state.
+//
+// In the previous example, we're concerned
+// with states associated with the conflicting alternatives. Here alt
+// 3 is not associated with the conflicting configs, but since we can continue
+// looking for input reasonably, I don't declare the state done. We
+// ignore a set of conflicting alts when we have an alternative
+// that we still need to pursue.
+func (p *ParserATNSimulator) getConflictingAltsOrUniqueAlt(configs *ATNConfigSet) *BitSet {
+ var conflictingAlts *BitSet
+ if configs.uniqueAlt != ATNInvalidAltNumber {
+ conflictingAlts = NewBitSet()
+ conflictingAlts.add(configs.uniqueAlt)
+ } else {
+ conflictingAlts = configs.conflictingAlts
+ }
+ return conflictingAlts
+}
+
+func (p *ParserATNSimulator) GetTokenName(t int) string {
+ if t == TokenEOF {
+ return "EOF"
+ }
+
+ if p.parser != nil && p.parser.GetLiteralNames() != nil && t < len(p.parser.GetLiteralNames()) {
+ return p.parser.GetLiteralNames()[t] + "<" + strconv.Itoa(t) + ">"
+ }
+
+ if p.parser != nil && p.parser.GetLiteralNames() != nil && t < len(p.parser.GetSymbolicNames()) {
+ return p.parser.GetSymbolicNames()[t] + "<" + strconv.Itoa(t) + ">"
+ }
+
+ return strconv.Itoa(t)
+}
+
+func (p *ParserATNSimulator) getLookaheadName(input TokenStream) string {
+ return p.GetTokenName(input.LA(1))
+}
+
+// Used for debugging in [AdaptivePredict] around [execATN], but I cut
+// it out for clarity now that alg. works well. We can leave this
+// "dead" code for a bit.
+func (p *ParserATNSimulator) dumpDeadEndConfigs(_ *NoViableAltException) {
+
+ panic("Not implemented")
+
+ // fmt.Println("dead end configs: ")
+ // var decs = nvae.deadEndConfigs
+ //
+ // for i:=0; i0) {
+ // var t = c.state.GetTransitions()[0]
+ // if t2, ok := t.(*AtomTransition); ok {
+ // trans = "Atom "+ p.GetTokenName(t2.label)
+ // } else if t3, ok := t.(SetTransition); ok {
+ // _, ok := t.(*NotSetTransition)
+ //
+ // var s string
+ // if (ok){
+ // s = "~"
+ // }
+ //
+ // trans = s + "Set " + t3.set
+ // }
+ // }
+ // fmt.Errorf(c.String(p.parser, true) + ":" + trans)
+ // }
+}
+
+func (p *ParserATNSimulator) noViableAlt(input TokenStream, outerContext ParserRuleContext, configs *ATNConfigSet, startIndex int) *NoViableAltException {
+ return NewNoViableAltException(p.parser, input, input.Get(startIndex), input.LT(1), configs, outerContext)
+}
+
+func (p *ParserATNSimulator) getUniqueAlt(configs *ATNConfigSet) int {
+ alt := ATNInvalidAltNumber
+ for _, c := range configs.configs {
+ if alt == ATNInvalidAltNumber {
+ alt = c.GetAlt() // found first alt
+ } else if c.GetAlt() != alt {
+ return ATNInvalidAltNumber
+ }
+ }
+ return alt
+}
+
+// Add an edge to the DFA, if possible. This method calls
+// {@link //addDFAState} to ensure the {@code to} state is present in the
+// DFA. If {@code from} is {@code nil}, or if {@code t} is outside the
+// range of edges that can be represented in the DFA tables, p method
+// returns without adding the edge to the DFA.
+//
+//
If {@code to} is {@code nil}, p method returns {@code nil}.
+// Otherwise, p method returns the {@link DFAState} returned by calling
+// {@link //addDFAState} for the {@code to} state.
+//
+// @param dfa The DFA
+// @param from The source state for the edge
+// @param t The input symbol
+// @param to The target state for the edge
+//
+// @return If {@code to} is {@code nil}, p method returns {@code nil}
+// otherwise p method returns the result of calling {@link //addDFAState}
+// on {@code to}
+//
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFAState) *DFAState {
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + p.GetTokenName(t))
+ }
+ if to == nil {
+ return nil
+ }
+ p.atn.stateMu.Lock()
+ to = p.addDFAState(dfa, to) // used existing if possible not incoming
+ p.atn.stateMu.Unlock()
+ if from == nil || t < -1 || t > p.atn.maxTokenType {
+ return to
+ }
+ p.atn.edgeMu.Lock()
+ if from.getEdges() == nil {
+ from.setEdges(make([]*DFAState, p.atn.maxTokenType+1+1))
+ }
+ from.setIthEdge(t+1, to) // connect
+ p.atn.edgeMu.Unlock()
+
+ if runtimeConfig.parserATNSimulatorDebug {
+ var names []string
+ if p.parser != nil {
+ names = p.parser.GetLiteralNames()
+ }
+
+ fmt.Println("DFA=\n" + dfa.String(names, nil))
+ }
+ return to
+}
+
+// addDFAState adds state D to the [DFA] if it is not already present, and returns
+// the actual instance stored in the [DFA]. If a state equivalent to D
+// is already in the [DFA], the existing state is returned. Otherwise, this
+// method returns D after adding it to the [DFA].
+//
+// If D is [ATNSimulatorError], this method returns [ATNSimulatorError] and
+// does not change the DFA.
+//
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) addDFAState(dfa *DFA, d *DFAState) *DFAState {
+ if d == ATNSimulatorError {
+ return d
+ }
+
+ existing, present := dfa.Get(d)
+ if present {
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Print("addDFAState " + d.String() + " exists")
+ }
+ return existing
+ }
+
+ // The state will be added if not already there or we will be given back the existing state struct
+ // if it is present.
+ //
+ d.stateNumber = dfa.Len()
+ if !d.configs.readOnly {
+ d.configs.OptimizeConfigs(&p.BaseATNSimulator)
+ d.configs.readOnly = true
+ d.configs.configLookup = nil
+ }
+ dfa.Put(d)
+
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("addDFAState new " + d.String())
+ }
+
+ return d
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) ReportAttemptingFullContext(dfa *DFA, conflictingAlts *BitSet, configs *ATNConfigSet, startIndex, stopIndex int) {
+ if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorRetryDebug {
+ interval := NewInterval(startIndex, stopIndex+1)
+ fmt.Println("ReportAttemptingFullContext decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() +
+ ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval))
+ }
+ if p.parser != nil {
+ p.parser.GetErrorListenerDispatch().ReportAttemptingFullContext(p.parser, dfa, startIndex, stopIndex, conflictingAlts, configs)
+ }
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) ReportContextSensitivity(dfa *DFA, prediction int, configs *ATNConfigSet, startIndex, stopIndex int) {
+ if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorRetryDebug {
+ interval := NewInterval(startIndex, stopIndex+1)
+ fmt.Println("ReportContextSensitivity decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() +
+ ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval))
+ }
+ if p.parser != nil {
+ p.parser.GetErrorListenerDispatch().ReportContextSensitivity(p.parser, dfa, startIndex, stopIndex, prediction, configs)
+ }
+}
+
+// ReportAmbiguity reports and ambiguity in the parse, which shows that the parser will explore a different route.
+//
+// If context-sensitive parsing, we know it's an ambiguity not a conflict or error, but we can report it to the developer
+// so that they can see that this is happening and can take action if they want to.
+//
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) ReportAmbiguity(dfa *DFA, _ *DFAState, startIndex, stopIndex int,
+ exact bool, ambigAlts *BitSet, configs *ATNConfigSet) {
+ if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorRetryDebug {
+ interval := NewInterval(startIndex, stopIndex+1)
+ fmt.Println("ReportAmbiguity " + ambigAlts.String() + ":" + configs.String() +
+ ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval))
+ }
+ if p.parser != nil {
+ p.parser.GetErrorListenerDispatch().ReportAmbiguity(p.parser, dfa, startIndex, stopIndex, exact, ambigAlts, configs)
+ }
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/parser_rule_context.go b/vendor/github.com/antlr4-go/antlr/v4/parser_rule_context.go
new file mode 100644
index 000000000..c249bc138
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/parser_rule_context.go
@@ -0,0 +1,421 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "reflect"
+ "strconv"
+)
+
+type ParserRuleContext interface {
+ RuleContext
+
+ SetException(RecognitionException)
+
+ AddTokenNode(token Token) *TerminalNodeImpl
+ AddErrorNode(badToken Token) *ErrorNodeImpl
+
+ EnterRule(listener ParseTreeListener)
+ ExitRule(listener ParseTreeListener)
+
+ SetStart(Token)
+ GetStart() Token
+
+ SetStop(Token)
+ GetStop() Token
+
+ AddChild(child RuleContext) RuleContext
+ RemoveLastChild()
+}
+
+type BaseParserRuleContext struct {
+ parentCtx RuleContext
+ invokingState int
+ RuleIndex int
+
+ start, stop Token
+ exception RecognitionException
+ children []Tree
+}
+
+func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int) *BaseParserRuleContext {
+ prc := new(BaseParserRuleContext)
+ InitBaseParserRuleContext(prc, parent, invokingStateNumber)
+ return prc
+}
+
+func InitBaseParserRuleContext(prc *BaseParserRuleContext, parent ParserRuleContext, invokingStateNumber int) {
+ // What context invoked b rule?
+ prc.parentCtx = parent
+
+ // What state invoked the rule associated with b context?
+ // The "return address" is the followState of invokingState
+ // If parent is nil, b should be -1.
+ if parent == nil {
+ prc.invokingState = -1
+ } else {
+ prc.invokingState = invokingStateNumber
+ }
+
+ prc.RuleIndex = -1
+ // * If we are debugging or building a parse tree for a Visitor,
+ // we need to track all of the tokens and rule invocations associated
+ // with prc rule's context. This is empty for parsing w/o tree constr.
+ // operation because we don't the need to track the details about
+ // how we parse prc rule.
+ // /
+ prc.children = nil
+ prc.start = nil
+ prc.stop = nil
+ // The exception that forced prc rule to return. If the rule successfully
+ // completed, prc is {@code nil}.
+ prc.exception = nil
+}
+
+func (prc *BaseParserRuleContext) SetException(e RecognitionException) {
+ prc.exception = e
+}
+
+func (prc *BaseParserRuleContext) GetChildren() []Tree {
+ return prc.children
+}
+
+func (prc *BaseParserRuleContext) CopyFrom(ctx *BaseParserRuleContext) {
+ // from RuleContext
+ prc.parentCtx = ctx.parentCtx
+ prc.invokingState = ctx.invokingState
+ prc.children = nil
+ prc.start = ctx.start
+ prc.stop = ctx.stop
+}
+
+func (prc *BaseParserRuleContext) GetText() string {
+ if prc.GetChildCount() == 0 {
+ return ""
+ }
+
+ var s string
+ for _, child := range prc.children {
+ s += child.(ParseTree).GetText()
+ }
+
+ return s
+}
+
+// EnterRule is called when any rule is entered.
+func (prc *BaseParserRuleContext) EnterRule(_ ParseTreeListener) {
+}
+
+// ExitRule is called when any rule is exited.
+func (prc *BaseParserRuleContext) ExitRule(_ ParseTreeListener) {
+}
+
+// * Does not set parent link other add methods do that
+func (prc *BaseParserRuleContext) addTerminalNodeChild(child TerminalNode) TerminalNode {
+ if prc.children == nil {
+ prc.children = make([]Tree, 0)
+ }
+ if child == nil {
+ panic("Child may not be null")
+ }
+ prc.children = append(prc.children, child)
+ return child
+}
+
+func (prc *BaseParserRuleContext) AddChild(child RuleContext) RuleContext {
+ if prc.children == nil {
+ prc.children = make([]Tree, 0)
+ }
+ if child == nil {
+ panic("Child may not be null")
+ }
+ prc.children = append(prc.children, child)
+ return child
+}
+
+// RemoveLastChild is used by [EnterOuterAlt] to toss out a [RuleContext] previously added as
+// we entered a rule. If we have a label, we will need to remove
+// the generic ruleContext object.
+func (prc *BaseParserRuleContext) RemoveLastChild() {
+ if prc.children != nil && len(prc.children) > 0 {
+ prc.children = prc.children[0 : len(prc.children)-1]
+ }
+}
+
+func (prc *BaseParserRuleContext) AddTokenNode(token Token) *TerminalNodeImpl {
+
+ node := NewTerminalNodeImpl(token)
+ prc.addTerminalNodeChild(node)
+ node.parentCtx = prc
+ return node
+
+}
+
+func (prc *BaseParserRuleContext) AddErrorNode(badToken Token) *ErrorNodeImpl {
+ node := NewErrorNodeImpl(badToken)
+ prc.addTerminalNodeChild(node)
+ node.parentCtx = prc
+ return node
+}
+
+func (prc *BaseParserRuleContext) GetChild(i int) Tree {
+ if prc.children != nil && len(prc.children) >= i {
+ return prc.children[i]
+ }
+
+ return nil
+}
+
+func (prc *BaseParserRuleContext) GetChildOfType(i int, childType reflect.Type) RuleContext {
+ if childType == nil {
+ return prc.GetChild(i).(RuleContext)
+ }
+
+ for j := 0; j < len(prc.children); j++ {
+ child := prc.children[j]
+ if reflect.TypeOf(child) == childType {
+ if i == 0 {
+ return child.(RuleContext)
+ }
+
+ i--
+ }
+ }
+
+ return nil
+}
+
+func (prc *BaseParserRuleContext) ToStringTree(ruleNames []string, recog Recognizer) string {
+ return TreesStringTree(prc, ruleNames, recog)
+}
+
+func (prc *BaseParserRuleContext) GetRuleContext() RuleContext {
+ return prc
+}
+
+func (prc *BaseParserRuleContext) Accept(visitor ParseTreeVisitor) interface{} {
+ return visitor.VisitChildren(prc)
+}
+
+func (prc *BaseParserRuleContext) SetStart(t Token) {
+ prc.start = t
+}
+
+func (prc *BaseParserRuleContext) GetStart() Token {
+ return prc.start
+}
+
+func (prc *BaseParserRuleContext) SetStop(t Token) {
+ prc.stop = t
+}
+
+func (prc *BaseParserRuleContext) GetStop() Token {
+ return prc.stop
+}
+
+func (prc *BaseParserRuleContext) GetToken(ttype int, i int) TerminalNode {
+
+ for j := 0; j < len(prc.children); j++ {
+ child := prc.children[j]
+ if c2, ok := child.(TerminalNode); ok {
+ if c2.GetSymbol().GetTokenType() == ttype {
+ if i == 0 {
+ return c2
+ }
+
+ i--
+ }
+ }
+ }
+ return nil
+}
+
+func (prc *BaseParserRuleContext) GetTokens(ttype int) []TerminalNode {
+ if prc.children == nil {
+ return make([]TerminalNode, 0)
+ }
+
+ tokens := make([]TerminalNode, 0)
+
+ for j := 0; j < len(prc.children); j++ {
+ child := prc.children[j]
+ if tchild, ok := child.(TerminalNode); ok {
+ if tchild.GetSymbol().GetTokenType() == ttype {
+ tokens = append(tokens, tchild)
+ }
+ }
+ }
+
+ return tokens
+}
+
+func (prc *BaseParserRuleContext) GetPayload() interface{} {
+ return prc
+}
+
+func (prc *BaseParserRuleContext) getChild(ctxType reflect.Type, i int) RuleContext {
+ if prc.children == nil || i < 0 || i >= len(prc.children) {
+ return nil
+ }
+
+ j := -1 // what element have we found with ctxType?
+ for _, o := range prc.children {
+
+ childType := reflect.TypeOf(o)
+
+ if childType.Implements(ctxType) {
+ j++
+ if j == i {
+ return o.(RuleContext)
+ }
+ }
+ }
+ return nil
+}
+
+// Go lacks generics, so it's not possible for us to return the child with the correct type, but we do
+// check for convertibility
+
+func (prc *BaseParserRuleContext) GetTypedRuleContext(ctxType reflect.Type, i int) RuleContext {
+ return prc.getChild(ctxType, i)
+}
+
+func (prc *BaseParserRuleContext) GetTypedRuleContexts(ctxType reflect.Type) []RuleContext {
+ if prc.children == nil {
+ return make([]RuleContext, 0)
+ }
+
+ contexts := make([]RuleContext, 0)
+
+ for _, child := range prc.children {
+ childType := reflect.TypeOf(child)
+
+ if childType.ConvertibleTo(ctxType) {
+ contexts = append(contexts, child.(RuleContext))
+ }
+ }
+ return contexts
+}
+
+func (prc *BaseParserRuleContext) GetChildCount() int {
+ if prc.children == nil {
+ return 0
+ }
+
+ return len(prc.children)
+}
+
+func (prc *BaseParserRuleContext) GetSourceInterval() Interval {
+ if prc.start == nil || prc.stop == nil {
+ return TreeInvalidInterval
+ }
+
+ return NewInterval(prc.start.GetTokenIndex(), prc.stop.GetTokenIndex())
+}
+
+//need to manage circular dependencies, so export now
+
+// Print out a whole tree, not just a node, in LISP format
+// (root child1 .. childN). Print just a node if b is a leaf.
+//
+
+func (prc *BaseParserRuleContext) String(ruleNames []string, stop RuleContext) string {
+
+ var p ParserRuleContext = prc
+ s := "["
+ for p != nil && p != stop {
+ if ruleNames == nil {
+ if !p.IsEmpty() {
+ s += strconv.Itoa(p.GetInvokingState())
+ }
+ } else {
+ ri := p.GetRuleIndex()
+ var ruleName string
+ if ri >= 0 && ri < len(ruleNames) {
+ ruleName = ruleNames[ri]
+ } else {
+ ruleName = strconv.Itoa(ri)
+ }
+ s += ruleName
+ }
+ if p.GetParent() != nil && (ruleNames != nil || !p.GetParent().(ParserRuleContext).IsEmpty()) {
+ s += " "
+ }
+ pi := p.GetParent()
+ if pi != nil {
+ p = pi.(ParserRuleContext)
+ } else {
+ p = nil
+ }
+ }
+ s += "]"
+ return s
+}
+
+func (prc *BaseParserRuleContext) SetParent(v Tree) {
+ if v == nil {
+ prc.parentCtx = nil
+ } else {
+ prc.parentCtx = v.(RuleContext)
+ }
+}
+
+func (prc *BaseParserRuleContext) GetInvokingState() int {
+ return prc.invokingState
+}
+
+func (prc *BaseParserRuleContext) SetInvokingState(t int) {
+ prc.invokingState = t
+}
+
+func (prc *BaseParserRuleContext) GetRuleIndex() int {
+ return prc.RuleIndex
+}
+
+func (prc *BaseParserRuleContext) GetAltNumber() int {
+ return ATNInvalidAltNumber
+}
+
+func (prc *BaseParserRuleContext) SetAltNumber(_ int) {}
+
+// IsEmpty returns true if the context of b is empty.
+//
+// A context is empty if there is no invoking state, meaning nobody calls
+// current context.
+func (prc *BaseParserRuleContext) IsEmpty() bool {
+ return prc.invokingState == -1
+}
+
+// GetParent returns the combined text of all child nodes. This method only considers
+// tokens which have been added to the parse tree.
+//
+// Since tokens on hidden channels (e.g. whitespace or comments) are not
+// added to the parse trees, they will not appear in the output of this
+// method.
+func (prc *BaseParserRuleContext) GetParent() Tree {
+ return prc.parentCtx
+}
+
+var ParserRuleContextEmpty = NewBaseParserRuleContext(nil, -1)
+
+type InterpreterRuleContext interface {
+ ParserRuleContext
+}
+
+type BaseInterpreterRuleContext struct {
+ *BaseParserRuleContext
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func NewBaseInterpreterRuleContext(parent BaseInterpreterRuleContext, invokingStateNumber, ruleIndex int) *BaseInterpreterRuleContext {
+
+ prc := new(BaseInterpreterRuleContext)
+
+ prc.BaseParserRuleContext = NewBaseParserRuleContext(parent, invokingStateNumber)
+
+ prc.RuleIndex = ruleIndex
+
+ return prc
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go b/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go
new file mode 100644
index 000000000..c1b80cc1f
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go
@@ -0,0 +1,727 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "golang.org/x/exp/slices"
+ "strconv"
+)
+
+var _emptyPredictionContextHash int
+
+func init() {
+ _emptyPredictionContextHash = murmurInit(1)
+ _emptyPredictionContextHash = murmurFinish(_emptyPredictionContextHash, 0)
+}
+
+func calculateEmptyHash() int {
+ return _emptyPredictionContextHash
+}
+
+const (
+ // BasePredictionContextEmptyReturnState represents {@code $} in an array in full context mode, $
+ // doesn't mean wildcard:
+ //
+ // $ + x = [$,x]
+ //
+ // Here,
+ //
+ // $ = EmptyReturnState
+ BasePredictionContextEmptyReturnState = 0x7FFFFFFF
+)
+
+// TODO: JI These are meant to be atomics - this does not seem to match the Java runtime here
+//
+//goland:noinspection GoUnusedGlobalVariable
+var (
+ BasePredictionContextglobalNodeCount = 1
+ BasePredictionContextid = BasePredictionContextglobalNodeCount
+)
+
+const (
+ PredictionContextEmpty = iota
+ PredictionContextSingleton
+ PredictionContextArray
+)
+
+// PredictionContext is a go idiomatic implementation of PredictionContext that does not rty to
+// emulate inheritance from Java, and can be used without an interface definition. An interface
+// is not required because no user code will ever need to implement this interface.
+type PredictionContext struct {
+ cachedHash int
+ pcType int
+ parentCtx *PredictionContext
+ returnState int
+ parents []*PredictionContext
+ returnStates []int
+}
+
+func NewEmptyPredictionContext() *PredictionContext {
+ nep := &PredictionContext{}
+ nep.cachedHash = calculateEmptyHash()
+ nep.pcType = PredictionContextEmpty
+ nep.returnState = BasePredictionContextEmptyReturnState
+ return nep
+}
+
+func NewBaseSingletonPredictionContext(parent *PredictionContext, returnState int) *PredictionContext {
+ pc := &PredictionContext{}
+ pc.pcType = PredictionContextSingleton
+ pc.returnState = returnState
+ pc.parentCtx = parent
+ if parent != nil {
+ pc.cachedHash = calculateHash(parent, returnState)
+ } else {
+ pc.cachedHash = calculateEmptyHash()
+ }
+ return pc
+}
+
+func SingletonBasePredictionContextCreate(parent *PredictionContext, returnState int) *PredictionContext {
+ if returnState == BasePredictionContextEmptyReturnState && parent == nil {
+ // someone can pass in the bits of an array ctx that mean $
+ return BasePredictionContextEMPTY
+ }
+ return NewBaseSingletonPredictionContext(parent, returnState)
+}
+
+func NewArrayPredictionContext(parents []*PredictionContext, returnStates []int) *PredictionContext {
+ // Parent can be nil only if full ctx mode and we make an array
+ // from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using
+ // nil parent and
+ // returnState == {@link //EmptyReturnState}.
+ hash := murmurInit(1)
+ for _, parent := range parents {
+ hash = murmurUpdate(hash, parent.Hash())
+ }
+ for _, returnState := range returnStates {
+ hash = murmurUpdate(hash, returnState)
+ }
+ hash = murmurFinish(hash, len(parents)<<1)
+
+ nec := &PredictionContext{}
+ nec.cachedHash = hash
+ nec.pcType = PredictionContextArray
+ nec.parents = parents
+ nec.returnStates = returnStates
+ return nec
+}
+
+func (p *PredictionContext) Hash() int {
+ return p.cachedHash
+}
+
+func (p *PredictionContext) Equals(other Collectable[*PredictionContext]) bool {
+ switch p.pcType {
+ case PredictionContextEmpty:
+ otherP := other.(*PredictionContext)
+ return other == nil || otherP == nil || otherP.isEmpty()
+ case PredictionContextSingleton:
+ return p.SingletonEquals(other)
+ case PredictionContextArray:
+ return p.ArrayEquals(other)
+ }
+ return false
+}
+
+func (p *PredictionContext) ArrayEquals(o Collectable[*PredictionContext]) bool {
+ if o == nil {
+ return false
+ }
+ other := o.(*PredictionContext)
+ if other == nil || other.pcType != PredictionContextArray {
+ return false
+ }
+ if p.cachedHash != other.Hash() {
+ return false // can't be same if hash is different
+ }
+
+ // Must compare the actual array elements and not just the array address
+ //
+ return slices.Equal(p.returnStates, other.returnStates) &&
+ slices.EqualFunc(p.parents, other.parents, func(x, y *PredictionContext) bool {
+ return x.Equals(y)
+ })
+}
+
+func (p *PredictionContext) SingletonEquals(other Collectable[*PredictionContext]) bool {
+ if other == nil {
+ return false
+ }
+ otherP := other.(*PredictionContext)
+ if otherP == nil {
+ return false
+ }
+
+ if p.cachedHash != otherP.Hash() {
+ return false // Can't be same if hash is different
+ }
+
+ if p.returnState != otherP.getReturnState(0) {
+ return false
+ }
+
+ // Both parents must be nil if one is
+ if p.parentCtx == nil {
+ return otherP.parentCtx == nil
+ }
+
+ return p.parentCtx.Equals(otherP.parentCtx)
+}
+
+func (p *PredictionContext) GetParent(i int) *PredictionContext {
+ switch p.pcType {
+ case PredictionContextEmpty:
+ return nil
+ case PredictionContextSingleton:
+ return p.parentCtx
+ case PredictionContextArray:
+ return p.parents[i]
+ }
+ return nil
+}
+
+func (p *PredictionContext) getReturnState(i int) int {
+ switch p.pcType {
+ case PredictionContextArray:
+ return p.returnStates[i]
+ default:
+ return p.returnState
+ }
+}
+
+func (p *PredictionContext) GetReturnStates() []int {
+ switch p.pcType {
+ case PredictionContextArray:
+ return p.returnStates
+ default:
+ return []int{p.returnState}
+ }
+}
+
+func (p *PredictionContext) length() int {
+ switch p.pcType {
+ case PredictionContextArray:
+ return len(p.returnStates)
+ default:
+ return 1
+ }
+}
+
+func (p *PredictionContext) hasEmptyPath() bool {
+ switch p.pcType {
+ case PredictionContextSingleton:
+ return p.returnState == BasePredictionContextEmptyReturnState
+ }
+ return p.getReturnState(p.length()-1) == BasePredictionContextEmptyReturnState
+}
+
+func (p *PredictionContext) String() string {
+ switch p.pcType {
+ case PredictionContextEmpty:
+ return "$"
+ case PredictionContextSingleton:
+ var up string
+
+ if p.parentCtx == nil {
+ up = ""
+ } else {
+ up = p.parentCtx.String()
+ }
+
+ if len(up) == 0 {
+ if p.returnState == BasePredictionContextEmptyReturnState {
+ return "$"
+ }
+
+ return strconv.Itoa(p.returnState)
+ }
+
+ return strconv.Itoa(p.returnState) + " " + up
+ case PredictionContextArray:
+ if p.isEmpty() {
+ return "[]"
+ }
+
+ s := "["
+ for i := 0; i < len(p.returnStates); i++ {
+ if i > 0 {
+ s = s + ", "
+ }
+ if p.returnStates[i] == BasePredictionContextEmptyReturnState {
+ s = s + "$"
+ continue
+ }
+ s = s + strconv.Itoa(p.returnStates[i])
+ if !p.parents[i].isEmpty() {
+ s = s + " " + p.parents[i].String()
+ } else {
+ s = s + "nil"
+ }
+ }
+ return s + "]"
+
+ default:
+ return "unknown"
+ }
+}
+
+func (p *PredictionContext) isEmpty() bool {
+ switch p.pcType {
+ case PredictionContextEmpty:
+ return true
+ case PredictionContextArray:
+ // since EmptyReturnState can only appear in the last position, we
+ // don't need to verify that size==1
+ return p.returnStates[0] == BasePredictionContextEmptyReturnState
+ default:
+ return false
+ }
+}
+
+func (p *PredictionContext) Type() int {
+ return p.pcType
+}
+
+func calculateHash(parent *PredictionContext, returnState int) int {
+ h := murmurInit(1)
+ h = murmurUpdate(h, parent.Hash())
+ h = murmurUpdate(h, returnState)
+ return murmurFinish(h, 2)
+}
+
+// Convert a {@link RuleContext} tree to a {@link BasePredictionContext} graph.
+// Return {@link //EMPTY} if {@code outerContext} is empty or nil.
+// /
+func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) *PredictionContext {
+ if outerContext == nil {
+ outerContext = ParserRuleContextEmpty
+ }
+ // if we are in RuleContext of start rule, s, then BasePredictionContext
+ // is EMPTY. Nobody called us. (if we are empty, return empty)
+ if outerContext.GetParent() == nil || outerContext == ParserRuleContextEmpty {
+ return BasePredictionContextEMPTY
+ }
+ // If we have a parent, convert it to a BasePredictionContext graph
+ parent := predictionContextFromRuleContext(a, outerContext.GetParent().(RuleContext))
+ state := a.states[outerContext.GetInvokingState()]
+ transition := state.GetTransitions()[0]
+
+ return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber())
+}
+
+func merge(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext {
+
+ // Share same graph if both same
+ //
+ if a == b || a.Equals(b) {
+ return a
+ }
+
+ if a.pcType == PredictionContextSingleton && b.pcType == PredictionContextSingleton {
+ return mergeSingletons(a, b, rootIsWildcard, mergeCache)
+ }
+ // At least one of a or b is array
+ // If one is $ and rootIsWildcard, return $ as wildcard
+ if rootIsWildcard {
+ if a.isEmpty() {
+ return a
+ }
+ if b.isEmpty() {
+ return b
+ }
+ }
+
+ // Convert either Singleton or Empty to arrays, so that we can merge them
+ //
+ ara := convertToArray(a)
+ arb := convertToArray(b)
+ return mergeArrays(ara, arb, rootIsWildcard, mergeCache)
+}
+
+func convertToArray(pc *PredictionContext) *PredictionContext {
+ switch pc.Type() {
+ case PredictionContextEmpty:
+ return NewArrayPredictionContext([]*PredictionContext{}, []int{})
+ case PredictionContextSingleton:
+ return NewArrayPredictionContext([]*PredictionContext{pc.GetParent(0)}, []int{pc.getReturnState(0)})
+ default:
+ // Already an array
+ }
+ return pc
+}
+
+// mergeSingletons merges two Singleton [PredictionContext] instances.
+//
+// Stack tops equal, parents merge is same return left graph.
+//
+//
+//
Same stack top, parents differ merge parents giving array node, then
+// remainders of those graphs. A new root node is created to point to the
+// merged parents.
+//
+//
+//
Different stack tops pointing to same parent. Make array node for the
+// root where both element in the root point to the same (original)
+// parent.
+//
+//
+//
Different stack tops pointing to different parents. Make array node for
+// the root where each element points to the corresponding original
+// parent.
+//
+//
+// @param a the first {@link SingletonBasePredictionContext}
+// @param b the second {@link SingletonBasePredictionContext}
+// @param rootIsWildcard {@code true} if this is a local-context merge,
+// otherwise false to indicate a full-context merge
+// @param mergeCache
+// /
+func mergeSingletons(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext {
+ if mergeCache != nil {
+ previous, present := mergeCache.Get(a, b)
+ if present {
+ return previous
+ }
+ previous, present = mergeCache.Get(b, a)
+ if present {
+ return previous
+ }
+ }
+
+ rootMerge := mergeRoot(a, b, rootIsWildcard)
+ if rootMerge != nil {
+ if mergeCache != nil {
+ mergeCache.Put(a, b, rootMerge)
+ }
+ return rootMerge
+ }
+ if a.returnState == b.returnState {
+ parent := merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache)
+ // if parent is same as existing a or b parent or reduced to a parent,
+ // return it
+ if parent.Equals(a.parentCtx) {
+ return a // ax + bx = ax, if a=b
+ }
+ if parent.Equals(b.parentCtx) {
+ return b // ax + bx = bx, if a=b
+ }
+ // else: ax + ay = a'[x,y]
+ // merge parents x and y, giving array node with x,y then remainders
+ // of those graphs. dup a, a' points at merged array.
+ // New joined parent so create a new singleton pointing to it, a'
+ spc := SingletonBasePredictionContextCreate(parent, a.returnState)
+ if mergeCache != nil {
+ mergeCache.Put(a, b, spc)
+ }
+ return spc
+ }
+ // a != b payloads differ
+ // see if we can collapse parents due to $+x parents if local ctx
+ var singleParent *PredictionContext
+ if a.Equals(b) || (a.parentCtx != nil && a.parentCtx.Equals(b.parentCtx)) { // ax +
+ // bx =
+ // [a,b]x
+ singleParent = a.parentCtx
+ }
+ if singleParent != nil { // parents are same
+ // sort payloads and use same parent
+ payloads := []int{a.returnState, b.returnState}
+ if a.returnState > b.returnState {
+ payloads[0] = b.returnState
+ payloads[1] = a.returnState
+ }
+ parents := []*PredictionContext{singleParent, singleParent}
+ apc := NewArrayPredictionContext(parents, payloads)
+ if mergeCache != nil {
+ mergeCache.Put(a, b, apc)
+ }
+ return apc
+ }
+ // parents differ and can't merge them. Just pack together
+ // into array can't merge.
+ // ax + by = [ax,by]
+ payloads := []int{a.returnState, b.returnState}
+ parents := []*PredictionContext{a.parentCtx, b.parentCtx}
+ if a.returnState > b.returnState { // sort by payload
+ payloads[0] = b.returnState
+ payloads[1] = a.returnState
+ parents = []*PredictionContext{b.parentCtx, a.parentCtx}
+ }
+ apc := NewArrayPredictionContext(parents, payloads)
+ if mergeCache != nil {
+ mergeCache.Put(a, b, apc)
+ }
+ return apc
+}
+
+// Handle case where at least one of {@code a} or {@code b} is
+// {@link //EMPTY}. In the following diagrams, the symbol {@code $} is used
+// to represent {@link //EMPTY}.
+//
+//
Local-Context Merges
+//
+//
These local-context merge operations are used when {@code rootIsWildcard}
+// is true.
+//
+//
{@link //EMPTY} is superset of any graph return {@link //EMPTY}.
+//
+//
+//
{@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is
+// {@code //EMPTY} return left graph.
+//
+//
+//
Special case of last merge if local context.
+//
+//
+//
Full-Context Merges
+//
+//
These full-context merge operations are used when {@code rootIsWildcard}
+// is false.
+//
+//
+//
+//
Must keep all contexts {@link //EMPTY} in array is a special value (and
+// nil parent).
+//
+//
+//
+//
+// @param a the first {@link SingletonBasePredictionContext}
+// @param b the second {@link SingletonBasePredictionContext}
+// @param rootIsWildcard {@code true} if this is a local-context merge,
+// otherwise false to indicate a full-context merge
+// /
+func mergeRoot(a, b *PredictionContext, rootIsWildcard bool) *PredictionContext {
+ if rootIsWildcard {
+ if a.pcType == PredictionContextEmpty {
+ return BasePredictionContextEMPTY // // + b =//
+ }
+ if b.pcType == PredictionContextEmpty {
+ return BasePredictionContextEMPTY // a +// =//
+ }
+ } else {
+ if a.isEmpty() && b.isEmpty() {
+ return BasePredictionContextEMPTY // $ + $ = $
+ } else if a.isEmpty() { // $ + x = [$,x]
+ payloads := []int{b.getReturnState(-1), BasePredictionContextEmptyReturnState}
+ parents := []*PredictionContext{b.GetParent(-1), nil}
+ return NewArrayPredictionContext(parents, payloads)
+ } else if b.isEmpty() { // x + $ = [$,x] ($ is always first if present)
+ payloads := []int{a.getReturnState(-1), BasePredictionContextEmptyReturnState}
+ parents := []*PredictionContext{a.GetParent(-1), nil}
+ return NewArrayPredictionContext(parents, payloads)
+ }
+ }
+ return nil
+}
+
+// Merge two {@link ArrayBasePredictionContext} instances.
+//
+//
Different tops, different parents.
+//
+//
+//
Shared top, same parents.
+//
+//
+//
Shared top, different parents.
+//
+//
+//
Shared top, all shared parents.
+//
+//
+//
Equal tops, merge parents and reduce top to
+// {@link SingletonBasePredictionContext}.
+//
+//
+//goland:noinspection GoBoolExpressions
+func mergeArrays(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext {
+ if mergeCache != nil {
+ previous, present := mergeCache.Get(a, b)
+ if present {
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous")
+ }
+ return previous
+ }
+ previous, present = mergeCache.Get(b, a)
+ if present {
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous")
+ }
+ return previous
+ }
+ }
+ // merge sorted payloads a + b => M
+ i := 0 // walks a
+ j := 0 // walks b
+ k := 0 // walks target M array
+
+ mergedReturnStates := make([]int, len(a.returnStates)+len(b.returnStates))
+ mergedParents := make([]*PredictionContext, len(a.returnStates)+len(b.returnStates))
+ // walk and merge to yield mergedParents, mergedReturnStates
+ for i < len(a.returnStates) && j < len(b.returnStates) {
+ aParent := a.parents[i]
+ bParent := b.parents[j]
+ if a.returnStates[i] == b.returnStates[j] {
+ // same payload (stack tops are equal), must yield merged singleton
+ payload := a.returnStates[i]
+ // $+$ = $
+ bothDollars := payload == BasePredictionContextEmptyReturnState && aParent == nil && bParent == nil
+ axAX := aParent != nil && bParent != nil && aParent.Equals(bParent) // ax+ax
+ // ->
+ // ax
+ if bothDollars || axAX {
+ mergedParents[k] = aParent // choose left
+ mergedReturnStates[k] = payload
+ } else { // ax+ay -> a'[x,y]
+ mergedParent := merge(aParent, bParent, rootIsWildcard, mergeCache)
+ mergedParents[k] = mergedParent
+ mergedReturnStates[k] = payload
+ }
+ i++ // hop over left one as usual
+ j++ // but also Skip one in right side since we merge
+ } else if a.returnStates[i] < b.returnStates[j] { // copy a[i] to M
+ mergedParents[k] = aParent
+ mergedReturnStates[k] = a.returnStates[i]
+ i++
+ } else { // b > a, copy b[j] to M
+ mergedParents[k] = bParent
+ mergedReturnStates[k] = b.returnStates[j]
+ j++
+ }
+ k++
+ }
+ // copy over any payloads remaining in either array
+ if i < len(a.returnStates) {
+ for p := i; p < len(a.returnStates); p++ {
+ mergedParents[k] = a.parents[p]
+ mergedReturnStates[k] = a.returnStates[p]
+ k++
+ }
+ } else {
+ for p := j; p < len(b.returnStates); p++ {
+ mergedParents[k] = b.parents[p]
+ mergedReturnStates[k] = b.returnStates[p]
+ k++
+ }
+ }
+ // trim merged if we combined a few that had same stack tops
+ if k < len(mergedParents) { // write index < last position trim
+ if k == 1 { // for just one merged element, return singleton top
+ pc := SingletonBasePredictionContextCreate(mergedParents[0], mergedReturnStates[0])
+ if mergeCache != nil {
+ mergeCache.Put(a, b, pc)
+ }
+ return pc
+ }
+ mergedParents = mergedParents[0:k]
+ mergedReturnStates = mergedReturnStates[0:k]
+ }
+
+ M := NewArrayPredictionContext(mergedParents, mergedReturnStates)
+
+ // if we created same array as a or b, return that instead
+ // TODO: JI track whether this is possible above during merge sort for speed and possibly avoid an allocation
+ if M.Equals(a) {
+ if mergeCache != nil {
+ mergeCache.Put(a, b, a)
+ }
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> a")
+ }
+ return a
+ }
+ if M.Equals(b) {
+ if mergeCache != nil {
+ mergeCache.Put(a, b, b)
+ }
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> b")
+ }
+ return b
+ }
+ combineCommonParents(&mergedParents)
+
+ if mergeCache != nil {
+ mergeCache.Put(a, b, M)
+ }
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> " + M.String())
+ }
+ return M
+}
+
+// Make pass over all M parents and merge any Equals() ones.
+// Note that we pass a pointer to the slice as we want to modify it in place.
+//
+//goland:noinspection GoUnusedFunction
+func combineCommonParents(parents *[]*PredictionContext) {
+ uniqueParents := NewJStore[*PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionContextCollection, "combineCommonParents for PredictionContext")
+
+ for p := 0; p < len(*parents); p++ {
+ parent := (*parents)[p]
+ _, _ = uniqueParents.Put(parent)
+ }
+ for q := 0; q < len(*parents); q++ {
+ pc, _ := uniqueParents.Get((*parents)[q])
+ (*parents)[q] = pc
+ }
+}
+
+func getCachedBasePredictionContext(context *PredictionContext, contextCache *PredictionContextCache, visited *VisitRecord) *PredictionContext {
+ if context.isEmpty() {
+ return context
+ }
+ existing, present := visited.Get(context)
+ if present {
+ return existing
+ }
+
+ existing, present = contextCache.Get(context)
+ if present {
+ visited.Put(context, existing)
+ return existing
+ }
+ changed := false
+ parents := make([]*PredictionContext, context.length())
+ for i := 0; i < len(parents); i++ {
+ parent := getCachedBasePredictionContext(context.GetParent(i), contextCache, visited)
+ if changed || !parent.Equals(context.GetParent(i)) {
+ if !changed {
+ parents = make([]*PredictionContext, context.length())
+ for j := 0; j < context.length(); j++ {
+ parents[j] = context.GetParent(j)
+ }
+ changed = true
+ }
+ parents[i] = parent
+ }
+ }
+ if !changed {
+ contextCache.add(context)
+ visited.Put(context, context)
+ return context
+ }
+ var updated *PredictionContext
+ if len(parents) == 0 {
+ updated = BasePredictionContextEMPTY
+ } else if len(parents) == 1 {
+ updated = SingletonBasePredictionContextCreate(parents[0], context.getReturnState(0))
+ } else {
+ updated = NewArrayPredictionContext(parents, context.GetReturnStates())
+ }
+ contextCache.add(updated)
+ visited.Put(updated, updated)
+ visited.Put(context, updated)
+
+ return updated
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/prediction_context_cache.go b/vendor/github.com/antlr4-go/antlr/v4/prediction_context_cache.go
new file mode 100644
index 000000000..25dfb11e8
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/prediction_context_cache.go
@@ -0,0 +1,48 @@
+package antlr
+
+var BasePredictionContextEMPTY = &PredictionContext{
+ cachedHash: calculateEmptyHash(),
+ pcType: PredictionContextEmpty,
+ returnState: BasePredictionContextEmptyReturnState,
+}
+
+// PredictionContextCache is Used to cache [PredictionContext] objects. It is used for the shared
+// context cash associated with contexts in DFA states. This cache
+// can be used for both lexers and parsers.
+type PredictionContextCache struct {
+ cache *JMap[*PredictionContext, *PredictionContext, Comparator[*PredictionContext]]
+}
+
+func NewPredictionContextCache() *PredictionContextCache {
+ return &PredictionContextCache{
+ cache: NewJMap[*PredictionContext, *PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionContextCacheCollection, "NewPredictionContextCache()"),
+ }
+}
+
+// Add a context to the cache and return it. If the context already exists,
+// return that one instead and do not add a new context to the cache.
+// Protect shared cache from unsafe thread access.
+func (p *PredictionContextCache) add(ctx *PredictionContext) *PredictionContext {
+ if ctx.isEmpty() {
+ return BasePredictionContextEMPTY
+ }
+
+ // Put will return the existing entry if it is present (note this is done via Equals, not whether it is
+ // the same pointer), otherwise it will add the new entry and return that.
+ //
+ existing, present := p.cache.Get(ctx)
+ if present {
+ return existing
+ }
+ p.cache.Put(ctx, ctx)
+ return ctx
+}
+
+func (p *PredictionContextCache) Get(ctx *PredictionContext) (*PredictionContext, bool) {
+ pc, exists := p.cache.Get(ctx)
+ return pc, exists
+}
+
+func (p *PredictionContextCache) length() int {
+ return p.cache.Len()
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/prediction_mode.go b/vendor/github.com/antlr4-go/antlr/v4/prediction_mode.go
new file mode 100644
index 000000000..3f85a6a52
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/prediction_mode.go
@@ -0,0 +1,536 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// This enumeration defines the prediction modes available in ANTLR 4 along with
+// utility methods for analyzing configuration sets for conflicts and/or
+// ambiguities.
+
+const (
+ // PredictionModeSLL represents the SLL(*) prediction mode.
+ // This prediction mode ignores the current
+ // parser context when making predictions. This is the fastest prediction
+ // mode, and provides correct results for many grammars. This prediction
+ // mode is more powerful than the prediction mode provided by ANTLR 3, but
+ // may result in syntax errors for grammar and input combinations which are
+ // not SLL.
+ //
+ // When using this prediction mode, the parser will either return a correct
+ // parse tree (i.e. the same parse tree that would be returned with the
+ // [PredictionModeLL] prediction mode), or it will Report a syntax error. If a
+ // syntax error is encountered when using the SLL prediction mode,
+ // it may be due to either an actual syntax error in the input or indicate
+ // that the particular combination of grammar and input requires the more
+ // powerful LL prediction abilities to complete successfully.
+ //
+ // This prediction mode does not provide any guarantees for prediction
+ // behavior for syntactically-incorrect inputs.
+ //
+ PredictionModeSLL = 0
+
+ // PredictionModeLL represents the LL(*) prediction mode.
+ // This prediction mode allows the current parser
+ // context to be used for resolving SLL conflicts that occur during
+ // prediction. This is the fastest prediction mode that guarantees correct
+ // parse results for all combinations of grammars with syntactically correct
+ // inputs.
+ //
+ // When using this prediction mode, the parser will make correct decisions
+ // for all syntactically-correct grammar and input combinations. However, in
+ // cases where the grammar is truly ambiguous this prediction mode might not
+ // report a precise answer for exactly which alternatives are
+ // ambiguous.
+ //
+ // This prediction mode does not provide any guarantees for prediction
+ // behavior for syntactically-incorrect inputs.
+ //
+ PredictionModeLL = 1
+
+ // PredictionModeLLExactAmbigDetection represents the LL(*) prediction mode
+ // with exact ambiguity detection.
+ //
+ // In addition to the correctness guarantees provided by the [PredictionModeLL] prediction mode,
+ // this prediction mode instructs the prediction algorithm to determine the
+ // complete and exact set of ambiguous alternatives for every ambiguous
+ // decision encountered while parsing.
+ //
+ // This prediction mode may be used for diagnosing ambiguities during
+ // grammar development. Due to the performance overhead of calculating sets
+ // of ambiguous alternatives, this prediction mode should be avoided when
+ // the exact results are not necessary.
+ //
+ // This prediction mode does not provide any guarantees for prediction
+ // behavior for syntactically-incorrect inputs.
+ //
+ PredictionModeLLExactAmbigDetection = 2
+)
+
+// PredictionModehasSLLConflictTerminatingPrediction computes the SLL prediction termination condition.
+//
+// This method computes the SLL prediction termination condition for both of
+// the following cases:
+//
+// - The usual SLL+LL fallback upon SLL conflict
+// - Pure SLL without LL fallback
+//
+// # Combined SLL+LL Parsing
+//
+// When LL-fallback is enabled upon SLL conflict, correct predictions are
+// ensured regardless of how the termination condition is computed by this
+// method. Due to the substantially higher cost of LL prediction, the
+// prediction should only fall back to LL when the additional lookahead
+// cannot lead to a unique SLL prediction.
+//
+// Assuming combined SLL+LL parsing, an SLL configuration set with only
+// conflicting subsets should fall back to full LL, even if the
+// configuration sets don't resolve to the same alternative, e.g.
+//
+// {1,2} and {3,4}
+//
+// If there is at least one non-conflicting
+// configuration, SLL could continue with the hopes that more lookahead will
+// resolve via one of those non-conflicting configurations.
+//
+// Here's the prediction termination rule them: SLL (for SLL+LL parsing)
+// stops when it sees only conflicting configuration subsets. In contrast,
+// full LL keeps going when there is uncertainty.
+//
+// # Heuristic
+//
+// As a heuristic, we stop prediction when we see any conflicting subset
+// unless we see a state that only has one alternative associated with it.
+// The single-alt-state thing lets prediction continue upon rules like
+// (otherwise, it would admit defeat too soon):
+//
+// [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ;
+//
+// When the [ATN] simulation reaches the state before ';', it has a
+// [DFA] state that looks like:
+//
+// [12|1|[], 6|2|[], 12|2|[]]
+//
+// Naturally
+//
+// 12|1|[] and 12|2|[]
+//
+// conflict, but we cannot stop processing this node because alternative to has another way to continue,
+// via
+//
+// [6|2|[]]
+//
+// It also let's us continue for this rule:
+//
+// [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B ;
+//
+// After Matching input A, we reach the stop state for rule A, state 1.
+// State 8 is the state immediately before B. Clearly alternatives 1 and 2
+// conflict and no amount of further lookahead will separate the two.
+// However, alternative 3 will be able to continue, and so we do not stop
+// working on this state. In the previous example, we're concerned with
+// states associated with the conflicting alternatives. Here alt 3 is not
+// associated with the conflicting configs, but since we can continue
+// looking for input reasonably, don't declare the state done.
+//
+// # Pure SLL Parsing
+//
+// To handle pure SLL parsing, all we have to do is make sure that we
+// combine stack contexts for configurations that differ only by semantic
+// predicate. From there, we can do the usual SLL termination heuristic.
+//
+// # Predicates in SLL+LL Parsing
+//
+// SLL decisions don't evaluate predicates until after they reach [DFA] stop
+// states because they need to create the [DFA] cache that works in all
+// semantic situations. In contrast, full LL evaluates predicates collected
+// during start state computation, so it can ignore predicates thereafter.
+// This means that SLL termination detection can totally ignore semantic
+// predicates.
+//
+// Implementation-wise, [ATNConfigSet] combines stack contexts but not
+// semantic predicate contexts, so we might see two configurations like the
+// following:
+//
+// (s, 1, x, {}), (s, 1, x', {p})
+//
+// Before testing these configurations against others, we have to merge
+// x and x' (without modifying the existing configurations).
+// For example, we test (x+x')==x” when looking for conflicts in
+// the following configurations:
+//
+// (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x”, {})
+//
+// If the configuration set has predicates (as indicated by
+// [ATNConfigSet.hasSemanticContext]), this algorithm makes a copy of
+// the configurations to strip out all the predicates so that a standard
+// [ATNConfigSet] will merge everything ignoring predicates.
+func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs *ATNConfigSet) bool {
+
+ // Configs in rule stop states indicate reaching the end of the decision
+ // rule (local context) or end of start rule (full context). If all
+ // configs meet this condition, then none of the configurations is able
+ // to Match additional input, so we terminate prediction.
+ //
+ if PredictionModeallConfigsInRuleStopStates(configs) {
+ return true
+ }
+
+ // pure SLL mode parsing
+ if mode == PredictionModeSLL {
+ // Don't bother with combining configs from different semantic
+ // contexts if we can fail over to full LL costs more time
+ // since we'll often fail over anyway.
+ if configs.hasSemanticContext {
+ // dup configs, tossing out semantic predicates
+ dup := NewATNConfigSet(false)
+ for _, c := range configs.configs {
+
+ // NewATNConfig({semanticContext:}, c)
+ c = NewATNConfig2(c, SemanticContextNone)
+ dup.Add(c, nil)
+ }
+ configs = dup
+ }
+ // now we have combined contexts for configs with dissimilar predicates
+ }
+ // pure SLL or combined SLL+LL mode parsing
+ altsets := PredictionModegetConflictingAltSubsets(configs)
+ return PredictionModehasConflictingAltSet(altsets) && !PredictionModehasStateAssociatedWithOneAlt(configs)
+}
+
+// PredictionModehasConfigInRuleStopState checks if any configuration in the given configs is in a
+// [RuleStopState]. Configurations meeting this condition have reached
+// the end of the decision rule (local context) or end of start rule (full
+// context).
+//
+// The func returns true if any configuration in the supplied configs is in a [RuleStopState]
+func PredictionModehasConfigInRuleStopState(configs *ATNConfigSet) bool {
+ for _, c := range configs.configs {
+ if _, ok := c.GetState().(*RuleStopState); ok {
+ return true
+ }
+ }
+ return false
+}
+
+// PredictionModeallConfigsInRuleStopStates checks if all configurations in configs are in a
+// [RuleStopState]. Configurations meeting this condition have reached
+// the end of the decision rule (local context) or end of start rule (full
+// context).
+//
+// the func returns true if all configurations in configs are in a
+// [RuleStopState]
+func PredictionModeallConfigsInRuleStopStates(configs *ATNConfigSet) bool {
+
+ for _, c := range configs.configs {
+ if _, ok := c.GetState().(*RuleStopState); !ok {
+ return false
+ }
+ }
+ return true
+}
+
+// PredictionModeresolvesToJustOneViableAlt checks full LL prediction termination.
+//
+// Can we stop looking ahead during [ATN] simulation or is there some
+// uncertainty as to which alternative we will ultimately pick, after
+// consuming more input? Even if there are partial conflicts, we might know
+// that everything is going to resolve to the same minimum alternative. That
+// means we can stop since no more lookahead will change that fact. On the
+// other hand, there might be multiple conflicts that resolve to different
+// minimums. That means we need more look ahead to decide which of those
+// alternatives we should predict.
+//
+// The basic idea is to split the set of configurations 'C', into
+// conflicting subsets (s, _, ctx, _) and singleton subsets with
+// non-conflicting configurations. Two configurations conflict if they have
+// identical [ATNConfig].state and [ATNConfig].context values
+// but a different [ATNConfig].alt value, e.g.
+//
+// (s, i, ctx, _)
+//
+// and
+//
+// (s, j, ctx, _) ; for i != j
+//
+// Reduce these configuration subsets to the set of possible alternatives.
+// You can compute the alternative subsets in one pass as follows:
+//
+// A_s,ctx = {i | (s, i, ctx, _)}
+//
+// for each configuration in C holding s and ctx fixed.
+//
+// Or in pseudo-code:
+//
+// for each configuration c in C:
+// map[c] U = c.ATNConfig.alt alt // map hash/equals uses s and x, not alt and not pred
+//
+// The values in map are the set of
+//
+// A_s,ctx
+//
+// sets.
+//
+// If
+//
+// |A_s,ctx| = 1
+//
+// then there is no conflict associated with s and ctx.
+//
+// Reduce the subsets to singletons by choosing a minimum of each subset. If
+// the union of these alternative subsets is a singleton, then no amount of
+// further lookahead will help us. We will always pick that alternative. If,
+// however, there is more than one alternative, then we are uncertain which
+// alternative to predict and must continue looking for resolution. We may
+// or may not discover an ambiguity in the future, even if there are no
+// conflicting subsets this round.
+//
+// The biggest sin is to terminate early because it means we've made a
+// decision but were uncertain as to the eventual outcome. We haven't used
+// enough lookahead. On the other hand, announcing a conflict too late is no
+// big deal; you will still have the conflict. It's just inefficient. It
+// might even look until the end of file.
+//
+// No special consideration for semantic predicates is required because
+// predicates are evaluated on-the-fly for full LL prediction, ensuring that
+// no configuration contains a semantic context during the termination
+// check.
+//
+// # Conflicting Configs
+//
+// Two configurations:
+//
+// (s, i, x) and (s, j, x')
+//
+// conflict when i != j but x = x'. Because we merge all
+// (s, i, _) configurations together, that means that there are at
+// most n configurations associated with state s for
+// n possible alternatives in the decision. The merged stacks
+// complicate the comparison of configuration contexts x and x'.
+//
+// Sam checks to see if one is a subset of the other by calling
+// merge and checking to see if the merged result is either x or x'.
+// If the x associated with lowest alternative i
+// is the superset, then i is the only possible prediction since the
+// others resolve to min(i) as well. However, if x is
+// associated with j > i then at least one stack configuration for
+// j is not in conflict with alternative i. The algorithm
+// should keep going, looking for more lookahead due to the uncertainty.
+//
+// For simplicity, I'm doing an equality check between x and
+// x', which lets the algorithm continue to consume lookahead longer
+// than necessary. The reason I like the equality is of course the
+// simplicity but also because that is the test you need to detect the
+// alternatives that are actually in conflict.
+//
+// # Continue/Stop Rule
+//
+// Continue if the union of resolved alternative sets from non-conflicting and
+// conflicting alternative subsets has more than one alternative. We are
+// uncertain about which alternative to predict.
+//
+// The complete set of alternatives,
+//
+// [i for (_, i, _)]
+//
+// tells us which alternatives are still in the running for the amount of input we've
+// consumed at this point. The conflicting sets let us to strip away
+// configurations that won't lead to more states because we resolve
+// conflicts to the configuration with a minimum alternate for the
+// conflicting set.
+//
+// Cases
+//
+// - no conflicts and more than 1 alternative in set => continue
+// - (s, 1, x), (s, 2, x), (s, 3, z), (s', 1, y), (s', 2, y) yields non-conflicting set
+// {3} ∪ conflicting sets min({1,2}) ∪ min({1,2}) = {1,3} => continue
+// - (s, 1, x), (s, 2, x), (s', 1, y), (s', 2, y), (s”, 1, z) yields non-conflicting set
+// {1} ∪ conflicting sets min({1,2}) ∪ min({1,2}) = {1} => stop and predict 1
+// - (s, 1, x), (s, 2, x), (s', 1, y), (s', 2, y) yields conflicting, reduced sets
+// {1} ∪ {1} = {1} => stop and predict 1, can announce ambiguity {1,2}
+// - (s, 1, x), (s, 2, x), (s', 2, y), (s', 3, y) yields conflicting, reduced sets
+// {1} ∪ {2} = {1,2} => continue
+// - (s, 1, x), (s, 2, x), (s', 2, y), (s', 3, y) yields conflicting, reduced sets
+// {1} ∪ {2} = {1,2} => continue
+// - (s, 1, x), (s, 2, x), (s', 3, y), (s', 4, y) yields conflicting, reduced sets
+// {1} ∪ {3} = {1,3} => continue
+//
+// # Exact Ambiguity Detection
+//
+// If all states report the same conflicting set of alternatives, then we
+// know we have the exact ambiguity set:
+//
+// |A_i| > 1
+//
+// and
+//
+// A_i = A_j ; for all i, j
+//
+// In other words, we continue examining lookahead until all A_i
+// have more than one alternative and all A_i are the same. If
+//
+// A={{1,2}, {1,3}}
+//
+// then regular LL prediction would terminate because the resolved set is {1}.
+// To determine what the real ambiguity is, we have to know whether the ambiguity is between one and
+// two or one and three so we keep going. We can only stop prediction when
+// we need exact ambiguity detection when the sets look like:
+//
+// A={{1,2}}
+//
+// or
+//
+// {{1,2},{1,2}}, etc...
+func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int {
+ return PredictionModegetSingleViableAlt(altsets)
+}
+
+// PredictionModeallSubsetsConflict determines if every alternative subset in altsets contains more
+// than one alternative.
+//
+// The func returns true if every [BitSet] in altsets has
+// [BitSet].cardinality cardinality > 1
+func PredictionModeallSubsetsConflict(altsets []*BitSet) bool {
+ return !PredictionModehasNonConflictingAltSet(altsets)
+}
+
+// PredictionModehasNonConflictingAltSet determines if any single alternative subset in altsets contains
+// exactly one alternative.
+//
+// The func returns true if altsets contains at least one [BitSet] with
+// [BitSet].cardinality cardinality 1
+func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool {
+ for i := 0; i < len(altsets); i++ {
+ alts := altsets[i]
+ if alts.length() == 1 {
+ return true
+ }
+ }
+ return false
+}
+
+// PredictionModehasConflictingAltSet determines if any single alternative subset in altsets contains
+// more than one alternative.
+//
+// The func returns true if altsets contains a [BitSet] with
+// [BitSet].cardinality cardinality > 1, otherwise false
+func PredictionModehasConflictingAltSet(altsets []*BitSet) bool {
+ for i := 0; i < len(altsets); i++ {
+ alts := altsets[i]
+ if alts.length() > 1 {
+ return true
+ }
+ }
+ return false
+}
+
+// PredictionModeallSubsetsEqual determines if every alternative subset in altsets is equivalent.
+//
+// The func returns true if every member of altsets is equal to the others.
+func PredictionModeallSubsetsEqual(altsets []*BitSet) bool {
+ var first *BitSet
+
+ for i := 0; i < len(altsets); i++ {
+ alts := altsets[i]
+ if first == nil {
+ first = alts
+ } else if alts != first {
+ return false
+ }
+ }
+
+ return true
+}
+
+// PredictionModegetUniqueAlt returns the unique alternative predicted by all alternative subsets in
+// altsets. If no such alternative exists, this method returns
+// [ATNInvalidAltNumber].
+//
+// @param altsets a collection of alternative subsets
+func PredictionModegetUniqueAlt(altsets []*BitSet) int {
+ all := PredictionModeGetAlts(altsets)
+ if all.length() == 1 {
+ return all.minValue()
+ }
+
+ return ATNInvalidAltNumber
+}
+
+// PredictionModeGetAlts returns the complete set of represented alternatives for a collection of
+// alternative subsets. This method returns the union of each [BitSet]
+// in altsets, being the set of represented alternatives in altsets.
+func PredictionModeGetAlts(altsets []*BitSet) *BitSet {
+ all := NewBitSet()
+ for _, alts := range altsets {
+ all.or(alts)
+ }
+ return all
+}
+
+// PredictionModegetConflictingAltSubsets gets the conflicting alt subsets from a configuration set.
+//
+// for each configuration c in configs:
+// map[c] U= c.ATNConfig.alt // map hash/equals uses s and x, not alt and not pred
+func PredictionModegetConflictingAltSubsets(configs *ATNConfigSet) []*BitSet {
+ configToAlts := NewJMap[*ATNConfig, *BitSet, *ATNAltConfigComparator[*ATNConfig]](atnAltCfgEqInst, AltSetCollection, "PredictionModegetConflictingAltSubsets()")
+
+ for _, c := range configs.configs {
+
+ alts, ok := configToAlts.Get(c)
+ if !ok {
+ alts = NewBitSet()
+ configToAlts.Put(c, alts)
+ }
+ alts.add(c.GetAlt())
+ }
+
+ return configToAlts.Values()
+}
+
+// PredictionModeGetStateToAltMap gets a map from state to alt subset from a configuration set.
+//
+// for each configuration c in configs:
+// map[c.ATNConfig.state] U= c.ATNConfig.alt}
+func PredictionModeGetStateToAltMap(configs *ATNConfigSet) *AltDict {
+ m := NewAltDict()
+
+ for _, c := range configs.configs {
+ alts := m.Get(c.GetState().String())
+ if alts == nil {
+ alts = NewBitSet()
+ m.put(c.GetState().String(), alts)
+ }
+ alts.(*BitSet).add(c.GetAlt())
+ }
+ return m
+}
+
+func PredictionModehasStateAssociatedWithOneAlt(configs *ATNConfigSet) bool {
+ values := PredictionModeGetStateToAltMap(configs).values()
+ for i := 0; i < len(values); i++ {
+ if values[i].(*BitSet).length() == 1 {
+ return true
+ }
+ }
+ return false
+}
+
+// PredictionModegetSingleViableAlt gets the single alternative predicted by all alternative subsets in altsets
+// if there is one.
+//
+// TODO: JI - Review this code - it does not seem to do the same thing as the Java code - maybe because [BitSet] is not like the Java utils BitSet
+func PredictionModegetSingleViableAlt(altsets []*BitSet) int {
+ result := ATNInvalidAltNumber
+
+ for i := 0; i < len(altsets); i++ {
+ alts := altsets[i]
+ minAlt := alts.minValue()
+ if result == ATNInvalidAltNumber {
+ result = minAlt
+ } else if result != minAlt { // more than 1 viable alt
+ return ATNInvalidAltNumber
+ }
+ }
+ return result
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/recognizer.go b/vendor/github.com/antlr4-go/antlr/v4/recognizer.go
new file mode 100644
index 000000000..2e0b504fb
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/recognizer.go
@@ -0,0 +1,241 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strings"
+
+ "strconv"
+)
+
+type Recognizer interface {
+ GetLiteralNames() []string
+ GetSymbolicNames() []string
+ GetRuleNames() []string
+
+ Sempred(RuleContext, int, int) bool
+ Precpred(RuleContext, int) bool
+
+ GetState() int
+ SetState(int)
+ Action(RuleContext, int, int)
+ AddErrorListener(ErrorListener)
+ RemoveErrorListeners()
+ GetATN() *ATN
+ GetErrorListenerDispatch() ErrorListener
+ HasError() bool
+ GetError() RecognitionException
+ SetError(RecognitionException)
+}
+
+type BaseRecognizer struct {
+ listeners []ErrorListener
+ state int
+
+ RuleNames []string
+ LiteralNames []string
+ SymbolicNames []string
+ GrammarFileName string
+ SynErr RecognitionException
+}
+
+func NewBaseRecognizer() *BaseRecognizer {
+ rec := new(BaseRecognizer)
+ rec.listeners = []ErrorListener{ConsoleErrorListenerINSTANCE}
+ rec.state = -1
+ return rec
+}
+
+//goland:noinspection GoUnusedGlobalVariable
+var tokenTypeMapCache = make(map[string]int)
+
+//goland:noinspection GoUnusedGlobalVariable
+var ruleIndexMapCache = make(map[string]int)
+
+func (b *BaseRecognizer) checkVersion(toolVersion string) {
+ runtimeVersion := "4.12.0"
+ if runtimeVersion != toolVersion {
+ fmt.Println("ANTLR runtime and generated code versions disagree: " + runtimeVersion + "!=" + toolVersion)
+ }
+}
+
+func (b *BaseRecognizer) SetError(err RecognitionException) {
+ b.SynErr = err
+}
+
+func (b *BaseRecognizer) HasError() bool {
+ return b.SynErr != nil
+}
+
+func (b *BaseRecognizer) GetError() RecognitionException {
+ return b.SynErr
+}
+
+func (b *BaseRecognizer) Action(_ RuleContext, _, _ int) {
+ panic("action not implemented on Recognizer!")
+}
+
+func (b *BaseRecognizer) AddErrorListener(listener ErrorListener) {
+ b.listeners = append(b.listeners, listener)
+}
+
+func (b *BaseRecognizer) RemoveErrorListeners() {
+ b.listeners = make([]ErrorListener, 0)
+}
+
+func (b *BaseRecognizer) GetRuleNames() []string {
+ return b.RuleNames
+}
+
+func (b *BaseRecognizer) GetTokenNames() []string {
+ return b.LiteralNames
+}
+
+func (b *BaseRecognizer) GetSymbolicNames() []string {
+ return b.SymbolicNames
+}
+
+func (b *BaseRecognizer) GetLiteralNames() []string {
+ return b.LiteralNames
+}
+
+func (b *BaseRecognizer) GetState() int {
+ return b.state
+}
+
+func (b *BaseRecognizer) SetState(v int) {
+ b.state = v
+}
+
+//func (b *Recognizer) GetTokenTypeMap() {
+// var tokenNames = b.GetTokenNames()
+// if (tokenNames==nil) {
+// panic("The current recognizer does not provide a list of token names.")
+// }
+// var result = tokenTypeMapCache[tokenNames]
+// if(result==nil) {
+// result = tokenNames.reduce(function(o, k, i) { o[k] = i })
+// result.EOF = TokenEOF
+// tokenTypeMapCache[tokenNames] = result
+// }
+// return result
+//}
+
+// GetRuleIndexMap Get a map from rule names to rule indexes.
+//
+// Used for XPath and tree pattern compilation.
+//
+// TODO: JI This is not yet implemented in the Go runtime. Maybe not needed.
+func (b *BaseRecognizer) GetRuleIndexMap() map[string]int {
+
+ panic("Method not defined!")
+ // var ruleNames = b.GetRuleNames()
+ // if (ruleNames==nil) {
+ // panic("The current recognizer does not provide a list of rule names.")
+ // }
+ //
+ // var result = ruleIndexMapCache[ruleNames]
+ // if(result==nil) {
+ // result = ruleNames.reduce(function(o, k, i) { o[k] = i })
+ // ruleIndexMapCache[ruleNames] = result
+ // }
+ // return result
+}
+
+// GetTokenType get the token type based upon its name
+func (b *BaseRecognizer) GetTokenType(_ string) int {
+ panic("Method not defined!")
+ // var ttype = b.GetTokenTypeMap()[tokenName]
+ // if (ttype !=nil) {
+ // return ttype
+ // } else {
+ // return TokenInvalidType
+ // }
+}
+
+//func (b *Recognizer) GetTokenTypeMap() map[string]int {
+// Vocabulary vocabulary = getVocabulary()
+//
+// Synchronized (tokenTypeMapCache) {
+// Map result = tokenTypeMapCache.Get(vocabulary)
+// if (result == null) {
+// result = new HashMap()
+// for (int i = 0; i < GetATN().maxTokenType; i++) {
+// String literalName = vocabulary.getLiteralName(i)
+// if (literalName != null) {
+// result.put(literalName, i)
+// }
+//
+// String symbolicName = vocabulary.GetSymbolicName(i)
+// if (symbolicName != null) {
+// result.put(symbolicName, i)
+// }
+// }
+//
+// result.put("EOF", Token.EOF)
+// result = Collections.unmodifiableMap(result)
+// tokenTypeMapCache.put(vocabulary, result)
+// }
+//
+// return result
+// }
+//}
+
+// GetErrorHeader returns the error header, normally line/character position information.
+//
+// Can be overridden in sub structs embedding BaseRecognizer.
+func (b *BaseRecognizer) GetErrorHeader(e RecognitionException) string {
+ line := e.GetOffendingToken().GetLine()
+ column := e.GetOffendingToken().GetColumn()
+ return "line " + strconv.Itoa(line) + ":" + strconv.Itoa(column)
+}
+
+// GetTokenErrorDisplay shows how a token should be displayed in an error message.
+//
+// The default is to display just the text, but during development you might
+// want to have a lot of information spit out. Override in that case
+// to use t.String() (which, for CommonToken, dumps everything about
+// the token). This is better than forcing you to override a method in
+// your token objects because you don't have to go modify your lexer
+// so that it creates a NewJava type.
+//
+// Deprecated: This method is not called by the ANTLR 4 Runtime. Specific
+// implementations of [ANTLRErrorStrategy] may provide a similar
+// feature when necessary. For example, see [DefaultErrorStrategy].GetTokenErrorDisplay()
+func (b *BaseRecognizer) GetTokenErrorDisplay(t Token) string {
+ if t == nil {
+ return ""
+ }
+ s := t.GetText()
+ if s == "" {
+ if t.GetTokenType() == TokenEOF {
+ s = ""
+ } else {
+ s = "<" + strconv.Itoa(t.GetTokenType()) + ">"
+ }
+ }
+ s = strings.Replace(s, "\t", "\\t", -1)
+ s = strings.Replace(s, "\n", "\\n", -1)
+ s = strings.Replace(s, "\r", "\\r", -1)
+
+ return "'" + s + "'"
+}
+
+func (b *BaseRecognizer) GetErrorListenerDispatch() ErrorListener {
+ return NewProxyErrorListener(b.listeners)
+}
+
+// Sempred embedding structs need to override this if there are sempreds or actions
+// that the ATN interpreter needs to execute
+func (b *BaseRecognizer) Sempred(_ RuleContext, _ int, _ int) bool {
+ return true
+}
+
+// Precpred embedding structs need to override this if there are preceding predicates
+// that the ATN interpreter needs to execute
+func (b *BaseRecognizer) Precpred(_ RuleContext, _ int) bool {
+ return true
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/rule_context.go b/vendor/github.com/antlr4-go/antlr/v4/rule_context.go
new file mode 100644
index 000000000..f2ad04793
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/rule_context.go
@@ -0,0 +1,40 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// RuleContext is a record of a single rule invocation. It knows
+// which context invoked it, if any. If there is no parent context, then
+// naturally the invoking state is not valid. The parent link
+// provides a chain upwards from the current rule invocation to the root
+// of the invocation tree, forming a stack.
+//
+// We actually carry no information about the rule associated with this context (except
+// when parsing). We keep only the state number of the invoking state from
+// the [ATN] submachine that invoked this. Contrast this with the s
+// pointer inside [ParserRuleContext] that tracks the current state
+// being "executed" for the current rule.
+//
+// The parent contexts are useful for computing lookahead sets and
+// getting error information.
+//
+// These objects are used during parsing and prediction.
+// For the special case of parsers, we use the struct
+// [ParserRuleContext], which embeds a RuleContext.
+//
+// @see ParserRuleContext
+type RuleContext interface {
+ RuleNode
+
+ GetInvokingState() int
+ SetInvokingState(int)
+
+ GetRuleIndex() int
+ IsEmpty() bool
+
+ GetAltNumber() int
+ SetAltNumber(altNumber int)
+
+ String([]string, RuleContext) string
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/semantic_context.go b/vendor/github.com/antlr4-go/antlr/v4/semantic_context.go
new file mode 100644
index 000000000..68cb9061e
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/semantic_context.go
@@ -0,0 +1,464 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// SemanticContext is a tree structure used to record the semantic context in which
+//
+// an ATN configuration is valid. It's either a single predicate,
+// a conjunction p1 && p2, or a sum of products p1 || p2.
+//
+// I have scoped the AND, OR, and Predicate subclasses of
+// [SemanticContext] within the scope of this outer ``class''
+type SemanticContext interface {
+ Equals(other Collectable[SemanticContext]) bool
+ Hash() int
+
+ evaluate(parser Recognizer, outerContext RuleContext) bool
+ evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext
+
+ String() string
+}
+
+func SemanticContextandContext(a, b SemanticContext) SemanticContext {
+ if a == nil || a == SemanticContextNone {
+ return b
+ }
+ if b == nil || b == SemanticContextNone {
+ return a
+ }
+ result := NewAND(a, b)
+ if len(result.opnds) == 1 {
+ return result.opnds[0]
+ }
+
+ return result
+}
+
+func SemanticContextorContext(a, b SemanticContext) SemanticContext {
+ if a == nil {
+ return b
+ }
+ if b == nil {
+ return a
+ }
+ if a == SemanticContextNone || b == SemanticContextNone {
+ return SemanticContextNone
+ }
+ result := NewOR(a, b)
+ if len(result.opnds) == 1 {
+ return result.opnds[0]
+ }
+
+ return result
+}
+
+type Predicate struct {
+ ruleIndex int
+ predIndex int
+ isCtxDependent bool
+}
+
+func NewPredicate(ruleIndex, predIndex int, isCtxDependent bool) *Predicate {
+ p := new(Predicate)
+
+ p.ruleIndex = ruleIndex
+ p.predIndex = predIndex
+ p.isCtxDependent = isCtxDependent // e.g., $i ref in pred
+ return p
+}
+
+//The default {@link SemanticContext}, which is semantically equivalent to
+//a predicate of the form {@code {true}?}.
+
+var SemanticContextNone = NewPredicate(-1, -1, false)
+
+func (p *Predicate) evalPrecedence(_ Recognizer, _ RuleContext) SemanticContext {
+ return p
+}
+
+func (p *Predicate) evaluate(parser Recognizer, outerContext RuleContext) bool {
+
+ var localctx RuleContext
+
+ if p.isCtxDependent {
+ localctx = outerContext
+ }
+
+ return parser.Sempred(localctx, p.ruleIndex, p.predIndex)
+}
+
+func (p *Predicate) Equals(other Collectable[SemanticContext]) bool {
+ if p == other {
+ return true
+ } else if _, ok := other.(*Predicate); !ok {
+ return false
+ } else {
+ return p.ruleIndex == other.(*Predicate).ruleIndex &&
+ p.predIndex == other.(*Predicate).predIndex &&
+ p.isCtxDependent == other.(*Predicate).isCtxDependent
+ }
+}
+
+func (p *Predicate) Hash() int {
+ h := murmurInit(0)
+ h = murmurUpdate(h, p.ruleIndex)
+ h = murmurUpdate(h, p.predIndex)
+ if p.isCtxDependent {
+ h = murmurUpdate(h, 1)
+ } else {
+ h = murmurUpdate(h, 0)
+ }
+ return murmurFinish(h, 3)
+}
+
+func (p *Predicate) String() string {
+ return "{" + strconv.Itoa(p.ruleIndex) + ":" + strconv.Itoa(p.predIndex) + "}?"
+}
+
+type PrecedencePredicate struct {
+ precedence int
+}
+
+func NewPrecedencePredicate(precedence int) *PrecedencePredicate {
+
+ p := new(PrecedencePredicate)
+ p.precedence = precedence
+
+ return p
+}
+
+func (p *PrecedencePredicate) evaluate(parser Recognizer, outerContext RuleContext) bool {
+ return parser.Precpred(outerContext, p.precedence)
+}
+
+func (p *PrecedencePredicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
+ if parser.Precpred(outerContext, p.precedence) {
+ return SemanticContextNone
+ }
+
+ return nil
+}
+
+func (p *PrecedencePredicate) compareTo(other *PrecedencePredicate) int {
+ return p.precedence - other.precedence
+}
+
+func (p *PrecedencePredicate) Equals(other Collectable[SemanticContext]) bool {
+
+ var op *PrecedencePredicate
+ var ok bool
+ if op, ok = other.(*PrecedencePredicate); !ok {
+ return false
+ }
+
+ if p == op {
+ return true
+ }
+
+ return p.precedence == other.(*PrecedencePredicate).precedence
+}
+
+func (p *PrecedencePredicate) Hash() int {
+ h := uint32(1)
+ h = 31*h + uint32(p.precedence)
+ return int(h)
+}
+
+func (p *PrecedencePredicate) String() string {
+ return "{" + strconv.Itoa(p.precedence) + ">=prec}?"
+}
+
+func PrecedencePredicatefilterPrecedencePredicates(set *JStore[SemanticContext, Comparator[SemanticContext]]) []*PrecedencePredicate {
+ result := make([]*PrecedencePredicate, 0)
+
+ set.Each(func(v SemanticContext) bool {
+ if c2, ok := v.(*PrecedencePredicate); ok {
+ result = append(result, c2)
+ }
+ return true
+ })
+
+ return result
+}
+
+// A semantic context which is true whenever none of the contained contexts
+// is false.`
+
+type AND struct {
+ opnds []SemanticContext
+}
+
+func NewAND(a, b SemanticContext) *AND {
+
+ operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst, SemanticContextCollection, "NewAND() operands")
+ if aa, ok := a.(*AND); ok {
+ for _, o := range aa.opnds {
+ operands.Put(o)
+ }
+ } else {
+ operands.Put(a)
+ }
+
+ if ba, ok := b.(*AND); ok {
+ for _, o := range ba.opnds {
+ operands.Put(o)
+ }
+ } else {
+ operands.Put(b)
+ }
+ precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands)
+ if len(precedencePredicates) > 0 {
+ // interested in the transition with the lowest precedence
+ var reduced *PrecedencePredicate
+
+ for _, p := range precedencePredicates {
+ if reduced == nil || p.precedence < reduced.precedence {
+ reduced = p
+ }
+ }
+
+ operands.Put(reduced)
+ }
+
+ vs := operands.Values()
+ opnds := make([]SemanticContext, len(vs))
+ copy(opnds, vs)
+
+ and := new(AND)
+ and.opnds = opnds
+
+ return and
+}
+
+func (a *AND) Equals(other Collectable[SemanticContext]) bool {
+ if a == other {
+ return true
+ }
+ if _, ok := other.(*AND); !ok {
+ return false
+ } else {
+ for i, v := range other.(*AND).opnds {
+ if !a.opnds[i].Equals(v) {
+ return false
+ }
+ }
+ return true
+ }
+}
+
+// {@inheritDoc}
+//
+//
+// The evaluation of predicates by a context is short-circuiting, but
+// unordered.
+func (a *AND) evaluate(parser Recognizer, outerContext RuleContext) bool {
+ for i := 0; i < len(a.opnds); i++ {
+ if !a.opnds[i].evaluate(parser, outerContext) {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *AND) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
+ differs := false
+ operands := make([]SemanticContext, 0)
+
+ for i := 0; i < len(a.opnds); i++ {
+ context := a.opnds[i]
+ evaluated := context.evalPrecedence(parser, outerContext)
+ differs = differs || (evaluated != context)
+ if evaluated == nil {
+ // The AND context is false if any element is false
+ return nil
+ } else if evaluated != SemanticContextNone {
+ // Reduce the result by Skipping true elements
+ operands = append(operands, evaluated)
+ }
+ }
+ if !differs {
+ return a
+ }
+
+ if len(operands) == 0 {
+ // all elements were true, so the AND context is true
+ return SemanticContextNone
+ }
+
+ var result SemanticContext
+
+ for _, o := range operands {
+ if result == nil {
+ result = o
+ } else {
+ result = SemanticContextandContext(result, o)
+ }
+ }
+
+ return result
+}
+
+func (a *AND) Hash() int {
+ h := murmurInit(37) // Init with a value different from OR
+ for _, op := range a.opnds {
+ h = murmurUpdate(h, op.Hash())
+ }
+ return murmurFinish(h, len(a.opnds))
+}
+
+func (o *OR) Hash() int {
+ h := murmurInit(41) // Init with o value different from AND
+ for _, op := range o.opnds {
+ h = murmurUpdate(h, op.Hash())
+ }
+ return murmurFinish(h, len(o.opnds))
+}
+
+func (a *AND) String() string {
+ s := ""
+
+ for _, o := range a.opnds {
+ s += "&& " + fmt.Sprint(o)
+ }
+
+ if len(s) > 3 {
+ return s[0:3]
+ }
+
+ return s
+}
+
+//
+// A semantic context which is true whenever at least one of the contained
+// contexts is true.
+//
+
+type OR struct {
+ opnds []SemanticContext
+}
+
+func NewOR(a, b SemanticContext) *OR {
+
+ operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst, SemanticContextCollection, "NewOR() operands")
+ if aa, ok := a.(*OR); ok {
+ for _, o := range aa.opnds {
+ operands.Put(o)
+ }
+ } else {
+ operands.Put(a)
+ }
+
+ if ba, ok := b.(*OR); ok {
+ for _, o := range ba.opnds {
+ operands.Put(o)
+ }
+ } else {
+ operands.Put(b)
+ }
+ precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands)
+ if len(precedencePredicates) > 0 {
+ // interested in the transition with the lowest precedence
+ var reduced *PrecedencePredicate
+
+ for _, p := range precedencePredicates {
+ if reduced == nil || p.precedence > reduced.precedence {
+ reduced = p
+ }
+ }
+
+ operands.Put(reduced)
+ }
+
+ vs := operands.Values()
+
+ opnds := make([]SemanticContext, len(vs))
+ copy(opnds, vs)
+
+ o := new(OR)
+ o.opnds = opnds
+
+ return o
+}
+
+func (o *OR) Equals(other Collectable[SemanticContext]) bool {
+ if o == other {
+ return true
+ } else if _, ok := other.(*OR); !ok {
+ return false
+ } else {
+ for i, v := range other.(*OR).opnds {
+ if !o.opnds[i].Equals(v) {
+ return false
+ }
+ }
+ return true
+ }
+}
+
+//
+// The evaluation of predicates by o context is short-circuiting, but
+// unordered.
+func (o *OR) evaluate(parser Recognizer, outerContext RuleContext) bool {
+ for i := 0; i < len(o.opnds); i++ {
+ if o.opnds[i].evaluate(parser, outerContext) {
+ return true
+ }
+ }
+ return false
+}
+
+func (o *OR) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
+ differs := false
+ operands := make([]SemanticContext, 0)
+ for i := 0; i < len(o.opnds); i++ {
+ context := o.opnds[i]
+ evaluated := context.evalPrecedence(parser, outerContext)
+ differs = differs || (evaluated != context)
+ if evaluated == SemanticContextNone {
+ // The OR context is true if any element is true
+ return SemanticContextNone
+ } else if evaluated != nil {
+ // Reduce the result by Skipping false elements
+ operands = append(operands, evaluated)
+ }
+ }
+ if !differs {
+ return o
+ }
+ if len(operands) == 0 {
+ // all elements were false, so the OR context is false
+ return nil
+ }
+ var result SemanticContext
+
+ for _, o := range operands {
+ if result == nil {
+ result = o
+ } else {
+ result = SemanticContextorContext(result, o)
+ }
+ }
+
+ return result
+}
+
+func (o *OR) String() string {
+ s := ""
+
+ for _, o := range o.opnds {
+ s += "|| " + fmt.Sprint(o)
+ }
+
+ if len(s) > 3 {
+ return s[0:3]
+ }
+
+ return s
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/statistics.go b/vendor/github.com/antlr4-go/antlr/v4/statistics.go
new file mode 100644
index 000000000..70c0673a0
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/statistics.go
@@ -0,0 +1,281 @@
+//go:build antlr.stats
+
+package antlr
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "sync"
+)
+
+// This file allows the user to collect statistics about the runtime of the ANTLR runtime. It is not enabled by default
+// and so incurs no time penalty. To enable it, you must build the runtime with the antlr.stats build tag.
+//
+
+// Tells various components to collect statistics - because it is only true when this file is included, it will
+// allow the compiler to completely eliminate all the code that is only used when collecting statistics.
+const collectStats = true
+
+// goRunStats is a collection of all the various data the ANTLR runtime has collected about a particular run.
+// It is exported so that it can be used by others to look for things that are not already looked for in the
+// runtime statistics.
+type goRunStats struct {
+
+ // jStats is a slice of all the [JStatRec] records that have been created, which is one for EVERY collection created
+ // during a run. It is exported so that it can be used by others to look for things that are not already looked for
+ // within this package.
+ //
+ jStats []*JStatRec
+ jStatsLock sync.RWMutex
+ topN int
+ topNByMax []*JStatRec
+ topNByUsed []*JStatRec
+ unusedCollections map[CollectionSource]int
+ counts map[CollectionSource]int
+}
+
+const (
+ collectionsFile = "collections"
+)
+
+var (
+ Statistics = &goRunStats{
+ topN: 10,
+ }
+)
+
+type statsOption func(*goRunStats) error
+
+// Configure allows the statistics system to be configured as the user wants and override the defaults
+func (s *goRunStats) Configure(options ...statsOption) error {
+ for _, option := range options {
+ err := option(s)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// WithTopN sets the number of things to list in the report when we are concerned with the top N things.
+//
+// For example, if you want to see the top 20 collections by size, you can do:
+//
+// antlr.Statistics.Configure(antlr.WithTopN(20))
+func WithTopN(topN int) statsOption {
+ return func(s *goRunStats) error {
+ s.topN = topN
+ return nil
+ }
+}
+
+// Analyze looks through all the statistical records and computes all the outputs that might be useful to the user.
+//
+// The function gathers and analyzes a number of statistics about any particular run of
+// an ANTLR generated recognizer. In the vast majority of cases, the statistics are only
+// useful to maintainers of ANTLR itself, but they can be useful to users as well. They may be
+// especially useful in tracking down bugs or performance problems when an ANTLR user could
+// supply the output from this package, but cannot supply the grammar file(s) they are using, even
+// privately to the maintainers.
+//
+// The statistics are gathered by the runtime itself, and are not gathered by the parser or lexer, but the user
+// must call this function their selves to analyze the statistics. This is because none of the infrastructure is
+// extant unless the calling program is built with the antlr.stats tag like so:
+//
+// go build -tags antlr.stats .
+//
+// When a program is built with the antlr.stats tag, the Statistics object is created and available outside
+// the package. The user can then call the [Statistics.Analyze] function to analyze the statistics and then call the
+// [Statistics.Report] function to report the statistics.
+//
+// Please forward any questions about this package to the ANTLR discussion groups on GitHub or send to them to
+// me [Jim Idle] directly at jimi@idle.ws
+//
+// [Jim Idle]: https:://github.com/jim-idle
+func (s *goRunStats) Analyze() {
+
+ // Look for anything that looks strange and record it in our local maps etc for the report to present it
+ //
+ s.CollectionAnomalies()
+ s.TopNCollections()
+}
+
+// TopNCollections looks through all the statistical records and gathers the top ten collections by size.
+func (s *goRunStats) TopNCollections() {
+
+ // Let's sort the stat records by MaxSize
+ //
+ sort.Slice(s.jStats, func(i, j int) bool {
+ return s.jStats[i].MaxSize > s.jStats[j].MaxSize
+ })
+
+ for i := 0; i < len(s.jStats) && i < s.topN; i++ {
+ s.topNByMax = append(s.topNByMax, s.jStats[i])
+ }
+
+ // Sort by the number of times used
+ //
+ sort.Slice(s.jStats, func(i, j int) bool {
+ return s.jStats[i].Gets+s.jStats[i].Puts > s.jStats[j].Gets+s.jStats[j].Puts
+ })
+ for i := 0; i < len(s.jStats) && i < s.topN; i++ {
+ s.topNByUsed = append(s.topNByUsed, s.jStats[i])
+ }
+}
+
+// Report dumps a markdown formatted report of all the statistics collected during a run to the given dir output
+// path, which should represent a directory. Generated files will be prefixed with the given prefix and will be
+// given a type name such as `anomalies` and a time stamp such as `2021-09-01T12:34:56` and a .md suffix.
+func (s *goRunStats) Report(dir string, prefix string) error {
+
+ isDir, err := isDirectory(dir)
+ switch {
+ case err != nil:
+ return err
+ case !isDir:
+ return fmt.Errorf("output directory `%s` is not a directory", dir)
+ }
+ s.reportCollections(dir, prefix)
+
+ // Clean out any old data in case the user forgets
+ //
+ s.Reset()
+ return nil
+}
+
+func (s *goRunStats) Reset() {
+ s.jStats = nil
+ s.topNByUsed = nil
+ s.topNByMax = nil
+}
+
+func (s *goRunStats) reportCollections(dir, prefix string) {
+ cname := filepath.Join(dir, ".asciidoctor")
+ // If the file doesn't exist, create it, or append to the file
+ f, err := os.OpenFile(cname, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
+ if err != nil {
+ log.Fatal(err)
+ }
+ _, _ = f.WriteString(`// .asciidoctorconfig
+++++
+
+++++`)
+ _ = f.Close()
+
+ fname := filepath.Join(dir, prefix+"_"+"_"+collectionsFile+"_"+".adoc")
+ // If the file doesn't exist, create it, or append to the file
+ f, err = os.OpenFile(fname, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer func(f *os.File) {
+ err := f.Close()
+ if err != nil {
+ log.Fatal(err)
+ }
+ }(f)
+ _, _ = f.WriteString("= Collections for " + prefix + "\n\n")
+
+ _, _ = f.WriteString("== Summary\n")
+
+ if s.unusedCollections != nil {
+ _, _ = f.WriteString("=== Unused Collections\n")
+ _, _ = f.WriteString("Unused collections incur a penalty for allocation that makes them a candidate for either\n")
+ _, _ = f.WriteString(" removal or optimization. If you are using a collection that is not used, you should\n")
+ _, _ = f.WriteString(" consider removing it. If you are using a collection that is used, but not very often,\n")
+ _, _ = f.WriteString(" you should consider using lazy initialization to defer the allocation until it is\n")
+ _, _ = f.WriteString(" actually needed.\n\n")
+
+ _, _ = f.WriteString("\n.Unused collections\n")
+ _, _ = f.WriteString(`[cols="<3,>1"]` + "\n\n")
+ _, _ = f.WriteString("|===\n")
+ _, _ = f.WriteString("| Type | Count\n")
+
+ for k, v := range s.unusedCollections {
+ _, _ = f.WriteString("| " + CollectionDescriptors[k].SybolicName + " | " + strconv.Itoa(v) + "\n")
+ }
+ f.WriteString("|===\n\n")
+ }
+
+ _, _ = f.WriteString("\n.Summary of Collections\n")
+ _, _ = f.WriteString(`[cols="<3,>1"]` + "\n\n")
+ _, _ = f.WriteString("|===\n")
+ _, _ = f.WriteString("| Type | Count\n")
+ for k, v := range s.counts {
+ _, _ = f.WriteString("| " + CollectionDescriptors[k].SybolicName + " | " + strconv.Itoa(v) + "\n")
+ }
+ _, _ = f.WriteString("| Total | " + strconv.Itoa(len(s.jStats)) + "\n")
+ _, _ = f.WriteString("|===\n\n")
+
+ _, _ = f.WriteString("\n.Summary of Top " + strconv.Itoa(s.topN) + " Collections by MaxSize\n")
+ _, _ = f.WriteString(`[cols="<1,<3,>1,>1,>1,>1"]` + "\n\n")
+ _, _ = f.WriteString("|===\n")
+ _, _ = f.WriteString("| Source | Description | MaxSize | EndSize | Puts | Gets\n")
+ for _, c := range s.topNByMax {
+ _, _ = f.WriteString("| " + CollectionDescriptors[c.Source].SybolicName + "\n")
+ _, _ = f.WriteString("| " + c.Description + "\n")
+ _, _ = f.WriteString("| " + strconv.Itoa(c.MaxSize) + "\n")
+ _, _ = f.WriteString("| " + strconv.Itoa(c.CurSize) + "\n")
+ _, _ = f.WriteString("| " + strconv.Itoa(c.Puts) + "\n")
+ _, _ = f.WriteString("| " + strconv.Itoa(c.Gets) + "\n")
+ _, _ = f.WriteString("\n")
+ }
+ _, _ = f.WriteString("|===\n\n")
+
+ _, _ = f.WriteString("\n.Summary of Top " + strconv.Itoa(s.topN) + " Collections by Access\n")
+ _, _ = f.WriteString(`[cols="<1,<3,>1,>1,>1,>1,>1"]` + "\n\n")
+ _, _ = f.WriteString("|===\n")
+ _, _ = f.WriteString("| Source | Description | MaxSize | EndSize | Puts | Gets | P+G\n")
+ for _, c := range s.topNByUsed {
+ _, _ = f.WriteString("| " + CollectionDescriptors[c.Source].SybolicName + "\n")
+ _, _ = f.WriteString("| " + c.Description + "\n")
+ _, _ = f.WriteString("| " + strconv.Itoa(c.MaxSize) + "\n")
+ _, _ = f.WriteString("| " + strconv.Itoa(c.CurSize) + "\n")
+ _, _ = f.WriteString("| " + strconv.Itoa(c.Puts) + "\n")
+ _, _ = f.WriteString("| " + strconv.Itoa(c.Gets) + "\n")
+ _, _ = f.WriteString("| " + strconv.Itoa(c.Gets+c.Puts) + "\n")
+ _, _ = f.WriteString("\n")
+ }
+ _, _ = f.WriteString("|===\n\n")
+}
+
+// AddJStatRec adds a [JStatRec] record to the [goRunStats] collection when build runtimeConfig antlr.stats is enabled.
+func (s *goRunStats) AddJStatRec(rec *JStatRec) {
+ s.jStatsLock.Lock()
+ defer s.jStatsLock.Unlock()
+ s.jStats = append(s.jStats, rec)
+}
+
+// CollectionAnomalies looks through all the statistical records and gathers any anomalies that have been found.
+func (s *goRunStats) CollectionAnomalies() {
+ s.jStatsLock.RLock()
+ defer s.jStatsLock.RUnlock()
+ s.counts = make(map[CollectionSource]int, len(s.jStats))
+ for _, c := range s.jStats {
+
+ // Accumlate raw counts
+ //
+ s.counts[c.Source]++
+
+ // Look for allocated but unused collections and count them
+ if c.MaxSize == 0 && c.Puts == 0 {
+ if s.unusedCollections == nil {
+ s.unusedCollections = make(map[CollectionSource]int)
+ }
+ s.unusedCollections[c.Source]++
+ }
+ if c.MaxSize > 6000 {
+ fmt.Println("Collection ", c.Description, "accumulated a max size of ", c.MaxSize, " - this is probably too large and indicates a poorly formed grammar")
+ }
+ }
+
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/stats_data.go b/vendor/github.com/antlr4-go/antlr/v4/stats_data.go
new file mode 100644
index 000000000..4d9eb94e5
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/stats_data.go
@@ -0,0 +1,23 @@
+package antlr
+
+// A JStatRec is a record of a particular use of a [JStore], [JMap] or JPCMap] collection. Typically, it will be
+// used to look for unused collections that wre allocated anyway, problems with hash bucket clashes, and anomalies
+// such as huge numbers of Gets with no entries found GetNoEnt. You can refer to the CollectionAnomalies() function
+// for ideas on what can be gleaned from these statistics about collections.
+type JStatRec struct {
+ Source CollectionSource
+ MaxSize int
+ CurSize int
+ Gets int
+ GetHits int
+ GetMisses int
+ GetHashConflicts int
+ GetNoEnt int
+ Puts int
+ PutHits int
+ PutMisses int
+ PutHashConflicts int
+ MaxSlotSize int
+ Description string
+ CreateStack []byte
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/token.go b/vendor/github.com/antlr4-go/antlr/v4/token.go
new file mode 100644
index 000000000..9670efb82
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/token.go
@@ -0,0 +1,213 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "strconv"
+ "strings"
+)
+
+type TokenSourceCharStreamPair struct {
+ tokenSource TokenSource
+ charStream CharStream
+}
+
+// A token has properties: text, type, line, character position in the line
+// (so we can ignore tabs), token channel, index, and source from which
+// we obtained this token.
+
+type Token interface {
+ GetSource() *TokenSourceCharStreamPair
+ GetTokenType() int
+ GetChannel() int
+ GetStart() int
+ GetStop() int
+ GetLine() int
+ GetColumn() int
+
+ GetText() string
+ SetText(s string)
+
+ GetTokenIndex() int
+ SetTokenIndex(v int)
+
+ GetTokenSource() TokenSource
+ GetInputStream() CharStream
+
+ String() string
+}
+
+type BaseToken struct {
+ source *TokenSourceCharStreamPair
+ tokenType int // token type of the token
+ channel int // The parser ignores everything not on DEFAULT_CHANNEL
+ start int // optional return -1 if not implemented.
+ stop int // optional return -1 if not implemented.
+ tokenIndex int // from 0..n-1 of the token object in the input stream
+ line int // line=1..n of the 1st character
+ column int // beginning of the line at which it occurs, 0..n-1
+ text string // text of the token.
+ readOnly bool
+}
+
+const (
+ TokenInvalidType = 0
+
+ // TokenEpsilon - during lookahead operations, this "token" signifies we hit the rule end [ATN] state
+ // and did not follow it despite needing to.
+ TokenEpsilon = -2
+
+ TokenMinUserTokenType = 1
+
+ TokenEOF = -1
+
+ // TokenDefaultChannel is the default channel upon which tokens are sent to the parser.
+ //
+ // All tokens go to the parser (unless [Skip] is called in the lexer rule)
+ // on a particular "channel". The parser tunes to a particular channel
+ // so that whitespace etc... can go to the parser on a "hidden" channel.
+ TokenDefaultChannel = 0
+
+ // TokenHiddenChannel defines the normal hidden channel - the parser wil not see tokens that are not on [TokenDefaultChannel].
+ //
+ // Anything on a different channel than TokenDefaultChannel is not parsed by parser.
+ TokenHiddenChannel = 1
+)
+
+func (b *BaseToken) GetChannel() int {
+ return b.channel
+}
+
+func (b *BaseToken) GetStart() int {
+ return b.start
+}
+
+func (b *BaseToken) GetStop() int {
+ return b.stop
+}
+
+func (b *BaseToken) GetLine() int {
+ return b.line
+}
+
+func (b *BaseToken) GetColumn() int {
+ return b.column
+}
+
+func (b *BaseToken) GetTokenType() int {
+ return b.tokenType
+}
+
+func (b *BaseToken) GetSource() *TokenSourceCharStreamPair {
+ return b.source
+}
+
+func (b *BaseToken) GetTokenIndex() int {
+ return b.tokenIndex
+}
+
+func (b *BaseToken) SetTokenIndex(v int) {
+ b.tokenIndex = v
+}
+
+func (b *BaseToken) GetTokenSource() TokenSource {
+ return b.source.tokenSource
+}
+
+func (b *BaseToken) GetInputStream() CharStream {
+ return b.source.charStream
+}
+
+type CommonToken struct {
+ BaseToken
+}
+
+func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start, stop int) *CommonToken {
+
+ t := &CommonToken{
+ BaseToken: BaseToken{
+ source: source,
+ tokenType: tokenType,
+ channel: channel,
+ start: start,
+ stop: stop,
+ tokenIndex: -1,
+ },
+ }
+
+ if t.source.tokenSource != nil {
+ t.line = source.tokenSource.GetLine()
+ t.column = source.tokenSource.GetCharPositionInLine()
+ } else {
+ t.column = -1
+ }
+ return t
+}
+
+// An empty {@link Pair} which is used as the default value of
+// {@link //source} for tokens that do not have a source.
+
+//CommonToken.EMPTY_SOURCE = [ nil, nil ]
+
+// Constructs a New{@link CommonToken} as a copy of another {@link Token}.
+//
+//
+// If {@code oldToken} is also a {@link CommonToken} instance, the newly
+// constructed token will share a reference to the {@link //text} field and
+// the {@link Pair} stored in {@link //source}. Otherwise, {@link //text} will
+// be assigned the result of calling {@link //GetText}, and {@link //source}
+// will be constructed from the result of {@link Token//GetTokenSource} and
+// {@link Token//GetInputStream}.
+//
+// @param oldToken The token to copy.
+func (c *CommonToken) clone() *CommonToken {
+ t := NewCommonToken(c.source, c.tokenType, c.channel, c.start, c.stop)
+ t.tokenIndex = c.GetTokenIndex()
+ t.line = c.GetLine()
+ t.column = c.GetColumn()
+ t.text = c.GetText()
+ return t
+}
+
+func (c *CommonToken) GetText() string {
+ if c.text != "" {
+ return c.text
+ }
+ input := c.GetInputStream()
+ if input == nil {
+ return ""
+ }
+ n := input.Size()
+ if c.start < n && c.stop < n {
+ return input.GetTextFromInterval(NewInterval(c.start, c.stop))
+ }
+ return ""
+}
+
+func (c *CommonToken) SetText(text string) {
+ c.text = text
+}
+
+func (c *CommonToken) String() string {
+ txt := c.GetText()
+ if txt != "" {
+ txt = strings.Replace(txt, "\n", "\\n", -1)
+ txt = strings.Replace(txt, "\r", "\\r", -1)
+ txt = strings.Replace(txt, "\t", "\\t", -1)
+ } else {
+ txt = ""
+ }
+
+ var ch string
+ if c.channel > 0 {
+ ch = ",channel=" + strconv.Itoa(c.channel)
+ } else {
+ ch = ""
+ }
+
+ return "[@" + strconv.Itoa(c.tokenIndex) + "," + strconv.Itoa(c.start) + ":" + strconv.Itoa(c.stop) + "='" +
+ txt + "',<" + strconv.Itoa(c.tokenType) + ">" +
+ ch + "," + strconv.Itoa(c.line) + ":" + strconv.Itoa(c.column) + "]"
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/token_source.go b/vendor/github.com/antlr4-go/antlr/v4/token_source.go
new file mode 100644
index 000000000..a3f36eaa6
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/token_source.go
@@ -0,0 +1,17 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+type TokenSource interface {
+ NextToken() Token
+ Skip()
+ More()
+ GetLine() int
+ GetCharPositionInLine() int
+ GetInputStream() CharStream
+ GetSourceName() string
+ setTokenFactory(factory TokenFactory)
+ GetTokenFactory() TokenFactory
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/token_stream.go b/vendor/github.com/antlr4-go/antlr/v4/token_stream.go
new file mode 100644
index 000000000..bf4ff6633
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/token_stream.go
@@ -0,0 +1,21 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+type TokenStream interface {
+ IntStream
+
+ LT(k int) Token
+ Reset()
+
+ Get(index int) Token
+ GetTokenSource() TokenSource
+ SetTokenSource(TokenSource)
+
+ GetAllText() string
+ GetTextFromInterval(Interval) string
+ GetTextFromRuleContext(RuleContext) string
+ GetTextFromTokens(Token, Token) string
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/tokenstream_rewriter.go b/vendor/github.com/antlr4-go/antlr/v4/tokenstream_rewriter.go
new file mode 100644
index 000000000..ccf59b465
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/tokenstream_rewriter.go
@@ -0,0 +1,662 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "bytes"
+ "fmt"
+)
+
+//
+// Useful for rewriting out a buffered input token stream after doing some
+// augmentation or other manipulations on it.
+
+//
+// You can insert stuff, replace, and delete chunks. Note that the operations
+// are done lazily--only if you convert the buffer to a {@link String} with
+// {@link TokenStream#getText()}. This is very efficient because you are not
+// moving data around all the time. As the buffer of tokens is converted to
+// strings, the {@link #getText()} method(s) scan the input token stream and
+// check to see if there is an operation at the current index. If so, the
+// operation is done and then normal {@link String} rendering continues on the
+// buffer. This is like having multiple Turing machine instruction streams
+// (programs) operating on a single input tape. :)
+//
+
+// This rewriter makes no modifications to the token stream. It does not ask the
+// stream to fill itself up nor does it advance the input cursor. The token
+// stream {@link TokenStream#index()} will return the same value before and
+// after any {@link #getText()} call.
+
+//
+// The rewriter only works on tokens that you have in the buffer and ignores the
+// current input cursor. If you are buffering tokens on-demand, calling
+// {@link #getText()} halfway through the input will only do rewrites for those
+// tokens in the first half of the file.
+
+//
+// Since the operations are done lazily at {@link #getText}-time, operations do
+// not screw up the token index values. That is, an insert operation at token
+// index {@code i} does not change the index values for tokens
+// {@code i}+1..n-1.
+
+//
+// Because operations never actually alter the buffer, you may always get the
+// original token stream back without undoing anything. Since the instructions
+// are queued up, you can easily simulate transactions and roll back any changes
+// if there is an error just by removing instructions. For example,
+
+//
+// CharStream input = new ANTLRFileStream("input");
+// TLexer lex = new TLexer(input);
+// CommonTokenStream tokens = new CommonTokenStream(lex);
+// T parser = new T(tokens);
+// TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens);
+// parser.startRule();
+//
+
+//
+// Then in the rules, you can execute (assuming rewriter is visible):
+
+//
+// Token t,u;
+// ...
+// rewriter.insertAfter(t, "text to put after t");}
+// rewriter.insertAfter(u, "text after u");}
+// System.out.println(rewriter.getText());
+//
+
+//
+// You can also have multiple "instruction streams" and get multiple rewrites
+// from a single pass over the input. Just name the instruction streams and use
+// that name again when printing the buffer. This could be useful for generating
+// a C file and also its header file--all from the same buffer:
+
+//
+// rewriter.insertAfter("pass1", t, "text to put after t");}
+// rewriter.insertAfter("pass2", u, "text after u");}
+// System.out.println(rewriter.getText("pass1"));
+// System.out.println(rewriter.getText("pass2"));
+//
+
+//
+// If you don't use named rewrite streams, a "default" stream is used as the
+// first example shows.
+
+const (
+ DefaultProgramName = "default"
+ ProgramInitSize = 100
+ MinTokenIndex = 0
+)
+
+// Define the rewrite operation hierarchy
+
+type RewriteOperation interface {
+
+ // Execute the rewrite operation by possibly adding to the buffer.
+ // Return the index of the next token to operate on.
+ Execute(buffer *bytes.Buffer) int
+ String() string
+ GetInstructionIndex() int
+ GetIndex() int
+ GetText() string
+ GetOpName() string
+ GetTokens() TokenStream
+ SetInstructionIndex(val int)
+ SetIndex(int)
+ SetText(string)
+ SetOpName(string)
+ SetTokens(TokenStream)
+}
+
+type BaseRewriteOperation struct {
+ //Current index of rewrites list
+ instructionIndex int
+ //Token buffer index
+ index int
+ //Substitution text
+ text string
+ //Actual operation name
+ opName string
+ //Pointer to token steam
+ tokens TokenStream
+}
+
+func (op *BaseRewriteOperation) GetInstructionIndex() int {
+ return op.instructionIndex
+}
+
+func (op *BaseRewriteOperation) GetIndex() int {
+ return op.index
+}
+
+func (op *BaseRewriteOperation) GetText() string {
+ return op.text
+}
+
+func (op *BaseRewriteOperation) GetOpName() string {
+ return op.opName
+}
+
+func (op *BaseRewriteOperation) GetTokens() TokenStream {
+ return op.tokens
+}
+
+func (op *BaseRewriteOperation) SetInstructionIndex(val int) {
+ op.instructionIndex = val
+}
+
+func (op *BaseRewriteOperation) SetIndex(val int) {
+ op.index = val
+}
+
+func (op *BaseRewriteOperation) SetText(val string) {
+ op.text = val
+}
+
+func (op *BaseRewriteOperation) SetOpName(val string) {
+ op.opName = val
+}
+
+func (op *BaseRewriteOperation) SetTokens(val TokenStream) {
+ op.tokens = val
+}
+
+func (op *BaseRewriteOperation) Execute(_ *bytes.Buffer) int {
+ return op.index
+}
+
+func (op *BaseRewriteOperation) String() string {
+ return fmt.Sprintf("<%s@%d:\"%s\">",
+ op.opName,
+ op.tokens.Get(op.GetIndex()),
+ op.text,
+ )
+
+}
+
+type InsertBeforeOp struct {
+ BaseRewriteOperation
+}
+
+func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp {
+ return &InsertBeforeOp{BaseRewriteOperation: BaseRewriteOperation{
+ index: index,
+ text: text,
+ opName: "InsertBeforeOp",
+ tokens: stream,
+ }}
+}
+
+func (op *InsertBeforeOp) Execute(buffer *bytes.Buffer) int {
+ buffer.WriteString(op.text)
+ if op.tokens.Get(op.index).GetTokenType() != TokenEOF {
+ buffer.WriteString(op.tokens.Get(op.index).GetText())
+ }
+ return op.index + 1
+}
+
+func (op *InsertBeforeOp) String() string {
+ return op.BaseRewriteOperation.String()
+}
+
+// InsertAfterOp distinguishes between insert after/before to do the "insert after" instructions
+// first and then the "insert before" instructions at same index. Implementation
+// of "insert after" is "insert before index+1".
+type InsertAfterOp struct {
+ BaseRewriteOperation
+}
+
+func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp {
+ return &InsertAfterOp{
+ BaseRewriteOperation: BaseRewriteOperation{
+ index: index + 1,
+ text: text,
+ tokens: stream,
+ },
+ }
+}
+
+func (op *InsertAfterOp) Execute(buffer *bytes.Buffer) int {
+ buffer.WriteString(op.text)
+ if op.tokens.Get(op.index).GetTokenType() != TokenEOF {
+ buffer.WriteString(op.tokens.Get(op.index).GetText())
+ }
+ return op.index + 1
+}
+
+func (op *InsertAfterOp) String() string {
+ return op.BaseRewriteOperation.String()
+}
+
+// ReplaceOp tries to replace range from x..y with (y-x)+1 ReplaceOp
+// instructions.
+type ReplaceOp struct {
+ BaseRewriteOperation
+ LastIndex int
+}
+
+func NewReplaceOp(from, to int, text string, stream TokenStream) *ReplaceOp {
+ return &ReplaceOp{
+ BaseRewriteOperation: BaseRewriteOperation{
+ index: from,
+ text: text,
+ opName: "ReplaceOp",
+ tokens: stream,
+ },
+ LastIndex: to,
+ }
+}
+
+func (op *ReplaceOp) Execute(buffer *bytes.Buffer) int {
+ if op.text != "" {
+ buffer.WriteString(op.text)
+ }
+ return op.LastIndex + 1
+}
+
+func (op *ReplaceOp) String() string {
+ if op.text == "" {
+ return fmt.Sprintf("",
+ op.tokens.Get(op.index), op.tokens.Get(op.LastIndex))
+ }
+ return fmt.Sprintf("",
+ op.tokens.Get(op.index), op.tokens.Get(op.LastIndex), op.text)
+}
+
+type TokenStreamRewriter struct {
+ //Our source stream
+ tokens TokenStream
+ // You may have multiple, named streams of rewrite operations.
+ // I'm calling these things "programs."
+ // Maps String (name) → rewrite (List)
+ programs map[string][]RewriteOperation
+ lastRewriteTokenIndexes map[string]int
+}
+
+func NewTokenStreamRewriter(tokens TokenStream) *TokenStreamRewriter {
+ return &TokenStreamRewriter{
+ tokens: tokens,
+ programs: map[string][]RewriteOperation{
+ DefaultProgramName: make([]RewriteOperation, 0, ProgramInitSize),
+ },
+ lastRewriteTokenIndexes: map[string]int{},
+ }
+}
+
+func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream {
+ return tsr.tokens
+}
+
+// Rollback the instruction stream for a program so that
+// the indicated instruction (via instructionIndex) is no
+// longer in the stream. UNTESTED!
+func (tsr *TokenStreamRewriter) Rollback(programName string, instructionIndex int) {
+ is, ok := tsr.programs[programName]
+ if ok {
+ tsr.programs[programName] = is[MinTokenIndex:instructionIndex]
+ }
+}
+
+func (tsr *TokenStreamRewriter) RollbackDefault(instructionIndex int) {
+ tsr.Rollback(DefaultProgramName, instructionIndex)
+}
+
+// DeleteProgram Reset the program so that no instructions exist
+func (tsr *TokenStreamRewriter) DeleteProgram(programName string) {
+ tsr.Rollback(programName, MinTokenIndex) //TODO: double test on that cause lower bound is not included
+}
+
+func (tsr *TokenStreamRewriter) DeleteProgramDefault() {
+ tsr.DeleteProgram(DefaultProgramName)
+}
+
+func (tsr *TokenStreamRewriter) InsertAfter(programName string, index int, text string) {
+ // to insert after, just insert before next index (even if past end)
+ var op RewriteOperation = NewInsertAfterOp(index, text, tsr.tokens)
+ rewrites := tsr.GetProgram(programName)
+ op.SetInstructionIndex(len(rewrites))
+ tsr.AddToProgram(programName, op)
+}
+
+func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string) {
+ tsr.InsertAfter(DefaultProgramName, index, text)
+}
+
+func (tsr *TokenStreamRewriter) InsertAfterToken(programName string, token Token, text string) {
+ tsr.InsertAfter(programName, token.GetTokenIndex(), text)
+}
+
+func (tsr *TokenStreamRewriter) InsertBefore(programName string, index int, text string) {
+ var op RewriteOperation = NewInsertBeforeOp(index, text, tsr.tokens)
+ rewrites := tsr.GetProgram(programName)
+ op.SetInstructionIndex(len(rewrites))
+ tsr.AddToProgram(programName, op)
+}
+
+func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string) {
+ tsr.InsertBefore(DefaultProgramName, index, text)
+}
+
+func (tsr *TokenStreamRewriter) InsertBeforeToken(programName string, token Token, text string) {
+ tsr.InsertBefore(programName, token.GetTokenIndex(), text)
+}
+
+func (tsr *TokenStreamRewriter) Replace(programName string, from, to int, text string) {
+ if from > to || from < 0 || to < 0 || to >= tsr.tokens.Size() {
+ panic(fmt.Sprintf("replace: range invalid: %d..%d(size=%d)",
+ from, to, tsr.tokens.Size()))
+ }
+ var op RewriteOperation = NewReplaceOp(from, to, text, tsr.tokens)
+ rewrites := tsr.GetProgram(programName)
+ op.SetInstructionIndex(len(rewrites))
+ tsr.AddToProgram(programName, op)
+}
+
+func (tsr *TokenStreamRewriter) ReplaceDefault(from, to int, text string) {
+ tsr.Replace(DefaultProgramName, from, to, text)
+}
+
+func (tsr *TokenStreamRewriter) ReplaceDefaultPos(index int, text string) {
+ tsr.ReplaceDefault(index, index, text)
+}
+
+func (tsr *TokenStreamRewriter) ReplaceToken(programName string, from, to Token, text string) {
+ tsr.Replace(programName, from.GetTokenIndex(), to.GetTokenIndex(), text)
+}
+
+func (tsr *TokenStreamRewriter) ReplaceTokenDefault(from, to Token, text string) {
+ tsr.ReplaceToken(DefaultProgramName, from, to, text)
+}
+
+func (tsr *TokenStreamRewriter) ReplaceTokenDefaultPos(index Token, text string) {
+ tsr.ReplaceTokenDefault(index, index, text)
+}
+
+func (tsr *TokenStreamRewriter) Delete(programName string, from, to int) {
+ tsr.Replace(programName, from, to, "")
+}
+
+func (tsr *TokenStreamRewriter) DeleteDefault(from, to int) {
+ tsr.Delete(DefaultProgramName, from, to)
+}
+
+func (tsr *TokenStreamRewriter) DeleteDefaultPos(index int) {
+ tsr.DeleteDefault(index, index)
+}
+
+func (tsr *TokenStreamRewriter) DeleteToken(programName string, from, to Token) {
+ tsr.ReplaceToken(programName, from, to, "")
+}
+
+func (tsr *TokenStreamRewriter) DeleteTokenDefault(from, to Token) {
+ tsr.DeleteToken(DefaultProgramName, from, to)
+}
+
+func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndex(programName string) int {
+ i, ok := tsr.lastRewriteTokenIndexes[programName]
+ if !ok {
+ return -1
+ }
+ return i
+}
+
+func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndexDefault() int {
+ return tsr.GetLastRewriteTokenIndex(DefaultProgramName)
+}
+
+func (tsr *TokenStreamRewriter) SetLastRewriteTokenIndex(programName string, i int) {
+ tsr.lastRewriteTokenIndexes[programName] = i
+}
+
+func (tsr *TokenStreamRewriter) InitializeProgram(name string) []RewriteOperation {
+ is := make([]RewriteOperation, 0, ProgramInitSize)
+ tsr.programs[name] = is
+ return is
+}
+
+func (tsr *TokenStreamRewriter) AddToProgram(name string, op RewriteOperation) {
+ is := tsr.GetProgram(name)
+ is = append(is, op)
+ tsr.programs[name] = is
+}
+
+func (tsr *TokenStreamRewriter) GetProgram(name string) []RewriteOperation {
+ is, ok := tsr.programs[name]
+ if !ok {
+ is = tsr.InitializeProgram(name)
+ }
+ return is
+}
+
+// GetTextDefault returns the text from the original tokens altered per the
+// instructions given to this rewriter.
+func (tsr *TokenStreamRewriter) GetTextDefault() string {
+ return tsr.GetText(
+ DefaultProgramName,
+ NewInterval(0, tsr.tokens.Size()-1))
+}
+
+// GetText returns the text from the original tokens altered per the
+// instructions given to this rewriter.
+func (tsr *TokenStreamRewriter) GetText(programName string, interval Interval) string {
+ rewrites := tsr.programs[programName]
+ start := interval.Start
+ stop := interval.Stop
+ // ensure start/end are in range
+ stop = min(stop, tsr.tokens.Size()-1)
+ start = max(start, 0)
+ if len(rewrites) == 0 {
+ return tsr.tokens.GetTextFromInterval(interval) // no instructions to execute
+ }
+ buf := bytes.Buffer{}
+ // First, optimize instruction stream
+ indexToOp := reduceToSingleOperationPerIndex(rewrites)
+ // Walk buffer, executing instructions and emitting tokens
+ for i := start; i <= stop && i < tsr.tokens.Size(); {
+ op := indexToOp[i]
+ delete(indexToOp, i) // remove so any left have index size-1
+ t := tsr.tokens.Get(i)
+ if op == nil {
+ // no operation at that index, just dump token
+ if t.GetTokenType() != TokenEOF {
+ buf.WriteString(t.GetText())
+ }
+ i++ // move to next token
+ } else {
+ i = op.Execute(&buf) // execute operation and skip
+ }
+ }
+ // include stuff after end if it's last index in buffer
+ // So, if they did an insertAfter(lastValidIndex, "foo"), include
+ // foo if end==lastValidIndex.
+ if stop == tsr.tokens.Size()-1 {
+ // Scan any remaining operations after last token
+ // should be included (they will be inserts).
+ for _, op := range indexToOp {
+ if op.GetIndex() >= tsr.tokens.Size()-1 {
+ buf.WriteString(op.GetText())
+ }
+ }
+ }
+ return buf.String()
+}
+
+// reduceToSingleOperationPerIndex combines operations and report invalid operations (like
+// overlapping replaces that are not completed nested). Inserts to
+// same index need to be combined etc...
+//
+// Here are the cases:
+//
+// I.i.u I.j.v leave alone, non-overlapping
+// I.i.u I.i.v combine: Iivu
+//
+// R.i-j.u R.x-y.v | i-j in x-y delete first R
+// R.i-j.u R.i-j.v delete first R
+// R.i-j.u R.x-y.v | x-y in i-j ERROR
+// R.i-j.u R.x-y.v | boundaries overlap ERROR
+//
+// Delete special case of replace (text==null):
+// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
+//
+// I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before
+// we're not deleting i)
+// I.i.u R.x-y.v | i not in (x+1)-y leave alone, non-overlapping
+// R.x-y.v I.i.u | i in x-y ERROR
+// R.x-y.v I.x.u R.x-y.uv (combine, delete I)
+// R.x-y.v I.i.u | i not in x-y leave alone, non-overlapping
+//
+// I.i.u = insert u before op @ index i
+// R.x-y.u = replace x-y indexed tokens with u
+//
+// First we need to examine replaces. For any replace op:
+//
+// 1. wipe out any insertions before op within that range.
+// 2. Drop any replace op before that is contained completely within
+// that range.
+// 3. Throw exception upon boundary overlap with any previous replace.
+//
+// Then we can deal with inserts:
+//
+// 1. for any inserts to same index, combine even if not adjacent.
+// 2. for any prior replace with same left boundary, combine this
+// insert with replace and delete this 'replace'.
+// 3. throw exception if index in same range as previous replace
+//
+// Don't actually delete; make op null in list. Easier to walk list.
+// Later we can throw as we add to index → op map.
+//
+// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the
+// inserted stuff would be before the 'replace' range. But, if you
+// add tokens in front of a method body '{' and then delete the method
+// body, I think the stuff before the '{' you added should disappear too.
+//
+// The func returns a map from token index to operation.
+func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]RewriteOperation {
+ // WALK REPLACES
+ for i := 0; i < len(rewrites); i++ {
+ op := rewrites[i]
+ if op == nil {
+ continue
+ }
+ rop, ok := op.(*ReplaceOp)
+ if !ok {
+ continue
+ }
+ // Wipe prior inserts within range
+ for j := 0; j < i && j < len(rewrites); j++ {
+ if iop, ok := rewrites[j].(*InsertBeforeOp); ok {
+ if iop.index == rop.index {
+ // E.g., insert before 2, delete 2..2; update replace
+ // text to include insert before, kill insert
+ rewrites[iop.instructionIndex] = nil
+ if rop.text != "" {
+ rop.text = iop.text + rop.text
+ } else {
+ rop.text = iop.text
+ }
+ } else if iop.index > rop.index && iop.index <= rop.LastIndex {
+ // delete insert as it's a no-op.
+ rewrites[iop.instructionIndex] = nil
+ }
+ }
+ }
+ // Drop any prior replaces contained within
+ for j := 0; j < i && j < len(rewrites); j++ {
+ if prevop, ok := rewrites[j].(*ReplaceOp); ok {
+ if prevop.index >= rop.index && prevop.LastIndex <= rop.LastIndex {
+ // delete replace as it's a no-op.
+ rewrites[prevop.instructionIndex] = nil
+ continue
+ }
+ // throw exception unless disjoint or identical
+ disjoint := prevop.LastIndex < rop.index || prevop.index > rop.LastIndex
+ // Delete special case of replace (text==null):
+ // D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
+ if prevop.text == "" && rop.text == "" && !disjoint {
+ rewrites[prevop.instructionIndex] = nil
+ rop.index = min(prevop.index, rop.index)
+ rop.LastIndex = max(prevop.LastIndex, rop.LastIndex)
+ } else if !disjoint {
+ panic("replace op boundaries of " + rop.String() + " overlap with previous " + prevop.String())
+ }
+ }
+ }
+ }
+ // WALK INSERTS
+ for i := 0; i < len(rewrites); i++ {
+ op := rewrites[i]
+ if op == nil {
+ continue
+ }
+ //hack to replicate inheritance in composition
+ _, iok := rewrites[i].(*InsertBeforeOp)
+ _, aok := rewrites[i].(*InsertAfterOp)
+ if !iok && !aok {
+ continue
+ }
+ iop := rewrites[i]
+ // combine current insert with prior if any at same index
+ // deviating a bit from TokenStreamRewriter.java - hard to incorporate inheritance logic
+ for j := 0; j < i && j < len(rewrites); j++ {
+ if nextIop, ok := rewrites[j].(*InsertAfterOp); ok {
+ if nextIop.index == iop.GetIndex() {
+ iop.SetText(nextIop.text + iop.GetText())
+ rewrites[j] = nil
+ }
+ }
+ if prevIop, ok := rewrites[j].(*InsertBeforeOp); ok {
+ if prevIop.index == iop.GetIndex() {
+ iop.SetText(iop.GetText() + prevIop.text)
+ rewrites[prevIop.instructionIndex] = nil
+ }
+ }
+ }
+ // look for replaces where iop.index is in range; error
+ for j := 0; j < i && j < len(rewrites); j++ {
+ if rop, ok := rewrites[j].(*ReplaceOp); ok {
+ if iop.GetIndex() == rop.index {
+ rop.text = iop.GetText() + rop.text
+ rewrites[i] = nil
+ continue
+ }
+ if iop.GetIndex() >= rop.index && iop.GetIndex() <= rop.LastIndex {
+ panic("insert op " + iop.String() + " within boundaries of previous " + rop.String())
+ }
+ }
+ }
+ }
+ m := map[int]RewriteOperation{}
+ for i := 0; i < len(rewrites); i++ {
+ op := rewrites[i]
+ if op == nil {
+ continue
+ }
+ if _, ok := m[op.GetIndex()]; ok {
+ panic("should only be one op per index")
+ }
+ m[op.GetIndex()] = op
+ }
+ return m
+}
+
+/*
+ Quick fixing Go lack of overloads
+*/
+
+func max(a, b int) int {
+ if a > b {
+ return a
+ } else {
+ return b
+ }
+}
+func min(a, b int) int {
+ if a < b {
+ return a
+ } else {
+ return b
+ }
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/trace_listener.go b/vendor/github.com/antlr4-go/antlr/v4/trace_listener.go
new file mode 100644
index 000000000..7b663bf84
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/trace_listener.go
@@ -0,0 +1,32 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import "fmt"
+
+type TraceListener struct {
+ parser *BaseParser
+}
+
+func NewTraceListener(parser *BaseParser) *TraceListener {
+ tl := new(TraceListener)
+ tl.parser = parser
+ return tl
+}
+
+func (t *TraceListener) VisitErrorNode(_ ErrorNode) {
+}
+
+func (t *TraceListener) EnterEveryRule(ctx ParserRuleContext) {
+ fmt.Println("enter " + t.parser.GetRuleNames()[ctx.GetRuleIndex()] + ", LT(1)=" + t.parser.input.LT(1).GetText())
+}
+
+func (t *TraceListener) VisitTerminal(node TerminalNode) {
+ fmt.Println("consume " + fmt.Sprint(node.GetSymbol()) + " rule " + t.parser.GetRuleNames()[t.parser.ctx.GetRuleIndex()])
+}
+
+func (t *TraceListener) ExitEveryRule(ctx ParserRuleContext) {
+ fmt.Println("exit " + t.parser.GetRuleNames()[ctx.GetRuleIndex()] + ", LT(1)=" + t.parser.input.LT(1).GetText())
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/transition.go b/vendor/github.com/antlr4-go/antlr/v4/transition.go
new file mode 100644
index 000000000..313b0fc12
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/transition.go
@@ -0,0 +1,439 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// atom, set, epsilon, action, predicate, rule transitions.
+//
+//
This is a one way link. It emanates from a state (usually via a list of
+// transitions) and has a target state.
+//
+//
Since we never have to change the ATN transitions once we construct it,
+// the states. We'll use the term Edge for the DFA to distinguish them from
+// ATN transitions.
[fxamacker/cbor](https://github.com/fxamacker/cbor) is a library for encoding and decoding [CBOR](https://www.rfc-editor.org/info/std94) and [CBOR Sequences](https://www.rfc-editor.org/rfc/rfc8742.html).
CBOR is a [trusted alternative](https://www.rfc-editor.org/rfc/rfc8949.html#name-comparison-of-other-binary-) to JSON, MessagePack, Protocol Buffers, etc. CBOR is an Internet Standard defined by [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94) and is designed to be relevant for decades.
-`fxamacker/cbor` is used in projects by Arm Ltd., Cisco, EdgeX Foundry, Flow Foundation, Fraunhofer‑AISEC, Kubernetes, Let's Encrypt (ISRG), Linux Foundation, Microsoft, Mozilla, Oasis Protocol, Tailscale, Teleport, [etc](https://github.com/fxamacker/cbor#who-uses-fxamackercbor).
+`fxamacker/cbor` is used in projects by Arm Ltd., EdgeX Foundry, Flow Foundation, Fraunhofer‑AISEC, IBM, Kubernetes[*](https://github.com/search?q=org%3Akubernetes%20fxamacker%2Fcbor&type=code), Let's Encrypt, Linux Foundation, Microsoft, Oasis Protocol, Red Hat[*](https://github.com/search?q=org%3Aopenshift+fxamacker%2Fcbor&type=code), Tailscale[*](https://github.com/search?q=org%3Atailscale+fxamacker%2Fcbor&type=code), Veraison[*](https://github.com/search?q=org%3Averaison+fxamacker%2Fcbor&type=code), [etc](https://github.com/fxamacker/cbor#who-uses-fxamackercbor).
-See [Quick Start](#quick-start) and [Releases](https://github.com/fxamacker/cbor/releases/). 🆕 `UnmarshalFirst` and `DiagnoseFirst` can decode CBOR Sequences. `cbor.MarshalToBuffer()` and `UserBufferEncMode` accepts user-specified buffer.
+See [Quick Start](#quick-start) and [Releases](https://github.com/fxamacker/cbor/releases/). 🆕 `UnmarshalFirst` and `DiagnoseFirst` can decode CBOR Sequences. `MarshalToBuffer` and `UserBufferEncMode` accepts user-specified buffer.
## fxamacker/cbor
[](https://github.com/fxamacker/cbor/actions?query=workflow%3Aci)
-[](https://github.com/fxamacker/cbor/actions?query=workflow%3A%22cover+%E2%89%A596%25%22)
+[](https://github.com/fxamacker/cbor/actions?query=workflow%3A%22cover+%E2%89%A597%25%22)
[](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml)
[](#fuzzing-and-code-coverage)
[](https://goreportcard.com/report/github.com/fxamacker/cbor)
+[](https://github.com/fxamacker/cbor#fuzzing-and-code-coverage)
`fxamacker/cbor` is a CBOR codec in full conformance with [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94). It also supports CBOR Sequences ([RFC 8742](https://www.rfc-editor.org/rfc/rfc8742.html)) and Extended Diagnostic Notation ([Appendix G of RFC 8610](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G)).
Features include full support for CBOR tags, [Core Deterministic Encoding](https://www.rfc-editor.org/rfc/rfc8949.html#name-core-deterministic-encoding), duplicate map key detection, etc.
+API is mostly same as `encoding/json`, plus interfaces that simplify concurrency and CBOR options.
+
Design balances trade-offs between security, speed, concurrency, encoded data size, usability, etc.
-Highlights
+ 🔎 Highlights
__🚀 Speed__
@@ -38,7 +39,7 @@ Codec passed multiple confidential security assessments in 2022. No vulnerabili
__🗜️ Data Size__
-Struct tags (`toarray`, `keyasint`, `omitempty`) automatically reduce size of encoded structs. Encoding optionally shrinks float64→32→16 when values fit.
+Struct tag options (`toarray`, `keyasint`, `omitempty`, `omitzero`) and field tag "-" automatically reduce size of encoded structs. Encoding optionally shrinks float64→32→16 when values fit.
__:jigsaw: Usability__
@@ -58,164 +59,205 @@ Features include CBOR [extension points](https://www.rfc-editor.org/rfc/rfc8949.
`fxamacker/cbor` has configurable limits, etc. that defend against malicious CBOR data.
-By contrast, `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security).
-
-Example decoding with encoding/gob 💥 fatal error (out of memory)
-
-```Go
-// Example of encoding/gob having "fatal error: runtime: out of memory"
-// while decoding 181 bytes.
-package main
-import (
- "bytes"
- "encoding/gob"
- "encoding/hex"
- "fmt"
-)
-
-// Example data is from https://github.com/golang/go/issues/24446
-// (shortened to 181 bytes).
-const data = "4dffb503010102303001ff30000109010130010800010130010800010130" +
- "01ffb80001014a01ffb60001014b01ff860001013001ff860001013001ff" +
- "860001013001ff860001013001ffb80000001eff850401010e3030303030" +
- "30303030303030303001ff3000010c0104000016ffb70201010830303030" +
- "3030303001ff3000010c000030ffb6040405fcff00303030303030303030" +
- "303030303030303030303030303030303030303030303030303030303030" +
- "30"
-
-type X struct {
- J *X
- K map[string]int
-}
-
-func main() {
- raw, _ := hex.DecodeString(data)
- decoder := gob.NewDecoder(bytes.NewReader(raw))
-
- var x X
- decoder.Decode(&x) // fatal error: runtime: out of memory
- fmt.Println("Decoding finished.")
-}
-```
-
-
-
-
-
-`fxamacker/cbor` is fast at rejecting malformed CBOR data. E.g. attempts to
-decode 10 bytes of malicious CBOR data to `[]byte` (with default settings):
-
-| Codec | Speed (ns/op) | Memory | Allocs |
-| :---- | ------------: | -----: | -----: |
-| fxamacker/cbor 2.5.0 | 44 ± 5% | 32 B/op | 2 allocs/op |
-| ugorji/go 1.2.11 | 5353261 ± 4% | 67111321 B/op | 13 allocs/op |
-
-Benchmark details
-
-Latest comparison used:
-- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}`
-- go1.19.10, linux/amd64, i5-13600K (disabled all e-cores, DDR4 @2933)
-- go test -bench=. -benchmem -count=20
-
-#### Prior comparisons
-
-| Codec | Speed (ns/op) | Memory | Allocs |
-| :---- | ------------: | -----: | -----: |
-| fxamacker/cbor 2.5.0-beta2 | 44.33 ± 2% | 32 B/op | 2 allocs/op |
-| fxamacker/cbor 0.1.0 - 2.4.0 | ~44.68 ± 6% | 32 B/op | 2 allocs/op |
-| ugorji/go 1.2.10 | 5524792.50 ± 3% | 67110491 B/op | 12 allocs/op |
-| ugorji/go 1.1.0 - 1.2.6 | 💥 runtime: | out of memory: | cannot allocate |
-
-- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}`
-- go1.19.6, linux/amd64, i5-13600K (DDR4)
-- go test -bench=. -benchmem -count=20
-
-
-
-
-
-### Smaller Encodings with Struct Tags
-
-Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs.
-
-Example encoding 3-level nested Go struct to 1 byte CBOR
-
-https://go.dev/play/p/YxwvfPdFQG2
-
-```Go
-// Example encoding nested struct (with omitempty tag)
-// - encoding/json: 18 byte JSON
-// - fxamacker/cbor: 1 byte CBOR
-package main
-
-import (
- "encoding/hex"
- "encoding/json"
- "fmt"
-
- "github.com/fxamacker/cbor/v2"
-)
-
-type GrandChild struct {
- Quux int `json:",omitempty"`
-}
-
-type Child struct {
- Baz int `json:",omitempty"`
- Qux GrandChild `json:",omitempty"`
-}
-
-type Parent struct {
- Foo Child `json:",omitempty"`
- Bar int `json:",omitempty"`
-}
-
-func cb() {
- results, _ := cbor.Marshal(Parent{})
- fmt.Println("hex(CBOR): " + hex.EncodeToString(results))
-
- text, _ := cbor.Diagnose(results) // Diagnostic Notation
- fmt.Println("DN: " + text)
-}
-
-func js() {
- results, _ := json.Marshal(Parent{})
- fmt.Println("hex(JSON): " + hex.EncodeToString(results))
-
- text := string(results) // JSON
- fmt.Println("JSON: " + text)
-}
-
-func main() {
- cb()
- fmt.Println("-------------")
- js()
-}
-```
-
-Output (DN is Diagnostic Notation):
-```
-hex(CBOR): a0
-DN: {}
--------------
-hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d
-JSON: {"Foo":{"Qux":{}}}
-```
-
-
-
-
-
-Example using different struct tags together:
+Notably, `fxamacker/cbor` is fast at rejecting malformed CBOR data.
+
+> [!NOTE]
+> Benchmarks rejecting 10 bytes of malicious CBOR data decoding to `[]byte`:
+>
+> | Codec | Speed (ns/op) | Memory | Allocs |
+> | :---- | ------------: | -----: | -----: |
+> | fxamacker/cbor 2.7.0 | 47 ± 7% | 32 B/op | 2 allocs/op |
+> | ugorji/go 1.2.12 | 5878187 ± 3% | 67111556 B/op | 13 allocs/op |
+>
+> Faster hardware (overclocked DDR4 or DDR5) can reduce speed difference.
+>
+> 🔎 Benchmark details
+>
+> Latest comparison for decoding CBOR data to Go `[]byte`:
+> - Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}`
+> - go1.22.7, linux/amd64, i5-13600K (DDR4-2933, disabled e-cores)
+> - go test -bench=. -benchmem -count=20
+>
+> #### Prior comparisons
+>
+> | Codec | Speed (ns/op) | Memory | Allocs |
+> | :---- | ------------: | -----: | -----: |
+> | fxamacker/cbor 2.5.0-beta2 | 44.33 ± 2% | 32 B/op | 2 allocs/op |
+> | fxamacker/cbor 0.1.0 - 2.4.0 | ~44.68 ± 6% | 32 B/op | 2 allocs/op |
+> | ugorji/go 1.2.10 | 5524792.50 ± 3% | 67110491 B/op | 12 allocs/op |
+> | ugorji/go 1.1.0 - 1.2.6 | 💥 runtime: | out of memory: | cannot allocate |
+>
+> - Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}`
+> - go1.19.6, linux/amd64, i5-13600K (DDR4)
+> - go test -bench=. -benchmem -count=20
+>
+>
+
+In contrast, some codecs can crash or use excessive resources while decoding bad data.
+
+> [!WARNING]
+> Go's `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security).
+>
+> 🔎 gob fatal error (out of memory) 💥 decoding 181 bytes
+>
+> ```Go
+> // Example of encoding/gob having "fatal error: runtime: out of memory"
+> // while decoding 181 bytes (all Go versions as of Dec. 8, 2024).
+> package main
+> import (
+> "bytes"
+> "encoding/gob"
+> "encoding/hex"
+> "fmt"
+> )
+>
+> // Example data is from https://github.com/golang/go/issues/24446
+> // (shortened to 181 bytes).
+> const data = "4dffb503010102303001ff30000109010130010800010130010800010130" +
+> "01ffb80001014a01ffb60001014b01ff860001013001ff860001013001ff" +
+> "860001013001ff860001013001ffb80000001eff850401010e3030303030" +
+> "30303030303030303001ff3000010c0104000016ffb70201010830303030" +
+> "3030303001ff3000010c000030ffb6040405fcff00303030303030303030" +
+> "303030303030303030303030303030303030303030303030303030303030" +
+> "30"
+>
+> type X struct {
+> J *X
+> K map[string]int
+> }
+>
+> func main() {
+> raw, _ := hex.DecodeString(data)
+> decoder := gob.NewDecoder(bytes.NewReader(raw))
+>
+> var x X
+> decoder.Decode(&x) // fatal error: runtime: out of memory
+> fmt.Println("Decoding finished.")
+> }
+> ```
+>
+>
+>
+
+### Smaller Encodings with Struct Tag Options
+
+Struct tags automatically reduce encoded size of structs and improve speed.
+
+We can write less code by using struct tag options:
+- `toarray`: encode without field names (decode back to original struct)
+- `keyasint`: encode field names as integers (decode back to original struct)
+- `omitempty`: omit empty field when encoding
+- `omitzero`: omit zero-value field when encoding
+
+As a special case, struct field tag "-" omits the field.
+
+NOTE: When a struct uses `toarray`, the encoder will ignore `omitempty` and `omitzero` to prevent position of encoded array elements from changing. This allows decoder to match encoded elements to their Go struct field.

-API is mostly same as `encoding/json`, plus interfaces that simplify concurrency for CBOR options.
+> [!NOTE]
+> `fxamacker/cbor` can encode a 3-level nested Go struct to 1 byte!
+> - `encoding/json`: 18 bytes of JSON
+> - `fxamacker/cbor`: 1 byte of CBOR
+>
+> 🔎 Encoding 3-level nested Go struct with omitempty
+>
+> https://go.dev/play/p/YxwvfPdFQG2
+>
+> ```Go
+> // Example encoding nested struct (with omitempty tag)
+> // - encoding/json: 18 byte JSON
+> // - fxamacker/cbor: 1 byte CBOR
+>
+> package main
+>
+> import (
+> "encoding/hex"
+> "encoding/json"
+> "fmt"
+>
+> "github.com/fxamacker/cbor/v2"
+> )
+>
+> type GrandChild struct {
+> Quux int `json:",omitempty"`
+> }
+>
+> type Child struct {
+> Baz int `json:",omitempty"`
+> Qux GrandChild `json:",omitempty"`
+> }
+>
+> type Parent struct {
+> Foo Child `json:",omitempty"`
+> Bar int `json:",omitempty"`
+> }
+>
+> func cb() {
+> results, _ := cbor.Marshal(Parent{})
+> fmt.Println("hex(CBOR): " + hex.EncodeToString(results))
+>
+> text, _ := cbor.Diagnose(results) // Diagnostic Notation
+> fmt.Println("DN: " + text)
+> }
+>
+> func js() {
+> results, _ := json.Marshal(Parent{})
+> fmt.Println("hex(JSON): " + hex.EncodeToString(results))
+>
+> text := string(results) // JSON
+> fmt.Println("JSON: " + text)
+> }
+>
+> func main() {
+> cb()
+> fmt.Println("-------------")
+> js()
+> }
+> ```
+>
+> Output (DN is Diagnostic Notation):
+> ```
+> hex(CBOR): a0
+> DN: {}
+> -------------
+> hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d
+> JSON: {"Foo":{"Qux":{}}}
+> ```
+>
+>
+
## Quick Start
__Install__: `go get github.com/fxamacker/cbor/v2` and `import "github.com/fxamacker/cbor/v2"`.
+> [!TIP]
+>
+> Tinygo users can try beta/experimental branch [feature/cbor-tinygo-beta](https://github.com/fxamacker/cbor/tree/feature/cbor-tinygo-beta).
+>
+> 🔎 More about tinygo feature branch
+>
+> ### Tinygo
+>
+> Branch [feature/cbor-tinygo-beta](https://github.com/fxamacker/cbor/tree/feature/cbor-tinygo-beta) is based on fxamacker/cbor v2.7.0 and it can be compiled using tinygo v0.33 (also compiles with golang/go).
+>
+> It passes unit tests (with both go1.22 and tinygo v0.33) and is considered beta/experimental for tinygo.
+>
+> :warning: The `feature/cbor-tinygo-beta` branch does not get fuzz tested yet.
+>
+> Changes in this feature branch only affect tinygo compiled software. Summary of changes:
+> - default `DecOptions.MaxNestedLevels` is reduced to 16 (was 32). User can specify higher limit but 24+ crashes tests when compiled with tinygo v0.33.
+> - disabled decoding CBOR tag data to Go interface because tinygo v0.33 is missing needed feature.
+> - encoding error message can be different when encoding function type.
+>
+> Related tinygo issues:
+> - https://github.com/tinygo-org/tinygo/issues/4277
+> - https://github.com/tinygo-org/tinygo/issues/4458
+>
+>
+
+
### Key Points
This library can encode and decode CBOR (RFC 8949) and CBOR Sequences (RFC 8742).
@@ -252,16 +294,17 @@ rest, err = cbor.UnmarshalFirst(b, &v) // decode []byte b to v
// DiagnoseFirst translates first CBOR data item to text and returns remaining bytes.
text, rest, err = cbor.DiagnoseFirst(b) // decode []byte b to Diagnostic Notation text
-// NOTE: Unmarshal returns ExtraneousDataError if there are remaining bytes,
-// but new funcs UnmarshalFirst and DiagnoseFirst do not.
+// NOTE: Unmarshal() returns ExtraneousDataError if there are remaining bytes, but
+// UnmarshalFirst() and DiagnoseFirst() allow trailing bytes.
```
-__IMPORTANT__: 👉 CBOR settings allow trade-offs between speed, security, encoding size, etc.
-
-- Different CBOR libraries may use different default settings.
-- CBOR-based formats or protocols usually require specific settings.
-
-For example, WebAuthn uses "CTAP2 Canonical CBOR" which is available as a preset.
+> [!IMPORTANT]
+> CBOR settings allow trade-offs between speed, security, encoding size, etc.
+>
+> - Different CBOR libraries may use different default settings.
+> - CBOR-based formats or protocols usually require specific settings.
+>
+> For example, WebAuthn uses "CTAP2 Canonical CBOR" which is available as a preset.
### Presets
@@ -312,9 +355,63 @@ err = em.MarshalToBuffer(v, &buf) // encode v to provided buf
### Struct Tags
-Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs.
+Struct tag options (`toarray`, `keyasint`, `omitempty`, `omitzero`) reduce encoded size of structs.
+
+As a special case, struct field tag "-" omits the field.
+
+ 🔎 Example encoding with struct field tag "-"
+
+https://go.dev/play/p/aWEIFxd7InX
+
+```Go
+// https://github.com/fxamacker/cbor/issues/652
+package main
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/fxamacker/cbor/v2"
+)
+
+// The `cbor:"-"` tag omits the Type field when encoding to CBOR.
+type Entity struct {
+ _ struct{} `cbor:",toarray"`
+ ID uint64 `json:"id"`
+ Type string `cbor:"-" json:"typeOf"`
+ Name string `json:"name"`
+}
+
+func main() {
+ entity := Entity{
+ ID: 1,
+ Type: "int64",
+ Name: "Identifier",
+ }
+
+ c, _ := cbor.Marshal(entity)
+ diag, _ := cbor.Diagnose(c)
+ fmt.Printf("CBOR in hex: %x\n", c)
+ fmt.Printf("CBOR in edn: %s\n", diag)
+
+ j, _ := json.Marshal(entity)
+ fmt.Printf("JSON: %s\n", string(j))
+
+ fmt.Printf("JSON encoding is %d bytes\n", len(j))
+ fmt.Printf("CBOR encoding is %d bytes\n", len(c))
+
+ // Output:
+ // CBOR in hex: 82016a4964656e746966696572
+ // CBOR in edn: [1, "Identifier"]
+ // JSON: {"id":1,"typeOf":"int64","name":"Identifier"}
+ // JSON encoding is 45 bytes
+ // CBOR encoding is 13 bytes
+}
+```
+
+
-Example encoding 3-level nested Go struct to 1 byte CBOR
+ 🔎 Example encoding 3-level nested Go struct to 1 byte CBOR
https://go.dev/play/p/YxwvfPdFQG2
@@ -382,13 +479,13 @@ JSON: {"Foo":{"Qux":{}}}
-Example using several struct tags
+ 🔎 Example using struct tag options

-Struct tags simplify use of CBOR-based protocols that require CBOR arrays or maps with integer keys.
+Struct tag options simplify use of CBOR-based protocols that require CBOR arrays or maps with integer keys.
### CBOR Tags
@@ -404,7 +501,7 @@ em, err := opts.EncModeWithSharedTags(ts) // mutable shared CBOR tags
`TagSet` and modes using it are safe for concurrent use. Equivalent API is available for `DecMode`.
-Example using TagSet and TagOptions
+ 🔎 Example using TagSet and TagOptions
```go
// Use signedCWT struct defined in "Decoding CWT" example.
@@ -430,16 +527,149 @@ if err := dm.Unmarshal(data, &v); err != nil {
em, _ := cbor.EncOptions{}.EncModeWithTags(tags)
// Marshal signedCWT with tag number.
-if data, err := cbor.Marshal(v); err != nil {
+if data, err := em.Marshal(v); err != nil {
return err
}
```
+👉 `fxamacker/cbor` allows user apps to use almost any current or future CBOR tag number by implementing `cbor.Marshaler` and `cbor.Unmarshaler` interfaces.
+
+Basically, `MarshalCBOR` and `UnmarshalCBOR` functions can be implemented by user apps and those functions will automatically be called by this CBOR codec's `Marshal`, `Unmarshal`, etc.
+
+The following [example](https://github.com/fxamacker/cbor/blob/master/example_embedded_json_tag_for_cbor_test.go) shows how to encode and decode a tagged CBOR data item with tag number 262. The tag content is a JSON object "embedded" as a CBOR byte string (major type 2).
+
+ 🔎 Example using Embedded JSON Tag for CBOR (tag 262)
+
+```go
+// https://github.com/fxamacker/cbor/issues/657
+
+package cbor_test
+
+// NOTE: RFC 8949 does not mention tag number 262. IANA assigned
+// CBOR tag number 262 as "Embedded JSON Object" specified by the
+// document Embedded JSON Tag for CBOR:
+//
+// "Tag 262 can be applied to a byte string (major type 2) to indicate
+// that the byte string is a JSON Object. The length of the byte string
+// indicates the content."
+//
+// For more info, see Embedded JSON Tag for CBOR at:
+// https://github.com/toravir/CBOR-Tag-Specs/blob/master/embeddedJSON.md
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+
+ "github.com/fxamacker/cbor/v2"
+)
+
+// cborTagNumForEmbeddedJSON is the CBOR tag number 262.
+const cborTagNumForEmbeddedJSON = 262
+
+// EmbeddedJSON represents a Go value to be encoded as a tagged CBOR data item
+// with tag number 262 and the tag content is a JSON object "embedded" as a
+// CBOR byte string (major type 2).
+type EmbeddedJSON struct {
+ any
+}
+
+func NewEmbeddedJSON(val any) EmbeddedJSON {
+ return EmbeddedJSON{val}
+}
+
+// MarshalCBOR encodes EmbeddedJSON to a tagged CBOR data item with the
+// tag number 262 and the tag content is a JSON object that is
+// "embedded" as a CBOR byte string.
+func (v EmbeddedJSON) MarshalCBOR() ([]byte, error) {
+ // Encode v to JSON object.
+ data, err := json.Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+
+ // Create cbor.Tag representing a tagged CBOR data item.
+ tag := cbor.Tag{
+ Number: cborTagNumForEmbeddedJSON,
+ Content: data,
+ }
+
+ // Marshal to a tagged CBOR data item.
+ return cbor.Marshal(tag)
+}
+
+// UnmarshalCBOR decodes a tagged CBOR data item to EmbeddedJSON.
+// The byte slice provided to this function must contain a single
+// tagged CBOR data item with the tag number 262 and tag content
+// must be a JSON object "embedded" as a CBOR byte string.
+func (v *EmbeddedJSON) UnmarshalCBOR(b []byte) error {
+ // Unmarshal tagged CBOR data item.
+ var tag cbor.Tag
+ if err := cbor.Unmarshal(b, &tag); err != nil {
+ return err
+ }
+
+ // Check tag number.
+ if tag.Number != cborTagNumForEmbeddedJSON {
+ return fmt.Errorf("got tag number %d, expect tag number %d", tag.Number, cborTagNumForEmbeddedJSON)
+ }
+
+ // Check tag content.
+ jsonData, isByteString := tag.Content.([]byte)
+ if !isByteString {
+ return fmt.Errorf("got tag content type %T, expect tag content []byte", tag.Content)
+ }
+
+ // Unmarshal JSON object.
+ return json.Unmarshal(jsonData, v)
+}
+
+// MarshalJSON encodes EmbeddedJSON to a JSON object.
+func (v EmbeddedJSON) MarshalJSON() ([]byte, error) {
+ return json.Marshal(v.any)
+}
+
+// UnmarshalJSON decodes a JSON object.
+func (v *EmbeddedJSON) UnmarshalJSON(b []byte) error {
+ dec := json.NewDecoder(bytes.NewReader(b))
+ dec.UseNumber()
+ return dec.Decode(&v.any)
+}
+
+func Example_embeddedJSONTagForCBOR() {
+ value := NewEmbeddedJSON(map[string]any{
+ "name": "gopher",
+ "id": json.Number("42"),
+ })
+
+ data, err := cbor.Marshal(value)
+ if err != nil {
+ panic(err)
+ }
+
+ fmt.Printf("cbor: %x\n", data)
+
+ var v EmbeddedJSON
+ err = cbor.Unmarshal(data, &v)
+ if err != nil {
+ panic(err)
+ }
+
+ fmt.Printf("%+v\n", v.any)
+ for k, v := range v.any.(map[string]any) {
+ fmt.Printf(" %s: %v (%T)\n", k, v, v)
+ }
+}
+```
+
+
+
+
### Functions and Interfaces
-Functions and interfaces at a glance
+ 🔎 Functions and interfaces at a glance
Common functions with same API as `encoding/json`:
- `Marshal`, `Unmarshal`
@@ -453,7 +683,7 @@ because RFC 8949 treats CBOR data item with remaining bytes as malformed.
Other useful functions:
- `Diagnose`, `DiagnoseFirst` produce human-readable [Extended Diagnostic Notation](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G) from CBOR data.
- `UnmarshalFirst` decodes first CBOR data item and return any remaining bytes.
-- `Wellformed` returns true if the the CBOR data item is well-formed.
+- `Wellformed` returns true if the CBOR data item is well-formed.
Interfaces identical or comparable to Go `encoding` packages include:
`Marshaler`, `Unmarshaler`, `BinaryMarshaler`, and `BinaryUnmarshaler`.
@@ -472,15 +702,28 @@ Default limits may need to be increased for systems handling very large data (e.
## Status
-v2.7.0 (June 23, 2024) adds features and improvements that help large projects (e.g. Kubernetes) use CBOR as an alternative to JSON and Protocol Buffers. Other improvements include speedups, improved memory use, bug fixes, new serialization options, etc. It passed fuzz tests (5+ billion executions) and is production quality.
+[v2.9.0](https://github.com/fxamacker/cbor/releases/tag/v2.9.0) (Jul 13, 2025) improved interoperability/transcoding between CBOR & JSON, refactored tests, and improved docs.
+- Add opt-in support for `encoding.TextMarshaler` and `encoding.TextUnmarshaler` to encode and decode from CBOR text string.
+- Add opt-in support for `json.Marshaler` and `json.Unmarshaler` via user-provided transcoding function.
+- Update docs for TimeMode, Tag, RawTag, and add example for Embedded JSON Tag for CBOR.
+
+v2.9.0 passed fuzz tests and is production quality.
+
+The minimum version of Go required to build:
+- v2.8.0 and newer releases require go 1.20+.
+- v2.7.1 and older releases require go 1.17+.
For more details, see [release notes](https://github.com/fxamacker/cbor/releases).
-### Prior Release
+### Prior Releases
+
+[v2.8.0](https://github.com/fxamacker/cbor/releases/tag/v2.8.0) (March 30, 2025) is a small release primarily to add `omitzero` option to struct field tags and fix bugs. It passed fuzz tests (billions of executions) and is production quality.
+
+[v2.7.0](https://github.com/fxamacker/cbor/releases/tag/v2.7.0) (June 23, 2024) adds features and improvements that help large projects (e.g. Kubernetes) use CBOR as an alternative to JSON and Protocol Buffers. Other improvements include speedups, improved memory use, bug fixes, new serialization options, etc. It passed fuzz tests (5+ billion executions) and is production quality.
[v2.6.0](https://github.com/fxamacker/cbor/releases/tag/v2.6.0) (February 2024) adds important new features, optimizations, and bug fixes. It is especially useful to systems that need to convert data between CBOR and JSON. New options and optimizations improve handling of bignum, integers, maps, and strings.
-v2.5.0 was released on Sunday, August 13, 2023 with new features and important bug fixes. It is fuzz tested and production quality after extended beta [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023).
+[v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) was released on Sunday, August 13, 2023 with new features and important bug fixes. It is fuzz tested and production quality after extended beta [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023).
__IMPORTANT__: 👉 Before upgrading from v2.4 or older release, please read the notable changes highlighted in the release notes. v2.5.0 is a large release with bug fixes to error handling for extraneous data in `Unmarshal`, etc. that should be reviewed before upgrading.
@@ -489,7 +732,7 @@ See [v2.5.0 release notes](https://github.com/fxamacker/cbor/releases/tag/v2.5.0
See ["Version and API Changes"](https://github.com/fxamacker/cbor#versions-and-api-changes) section for more info about version numbering, etc.
+
+
+## [1.35.0/0.57.0/0.11.0] 2025-03-05
+
+This release is the last to support [Go 1.22].
+The next release will require at least [Go 1.23].
+
+### Added
+
+- Add `ValueFromAttribute` and `KeyValueFromAttribute` in `go.opentelemetry.io/otel/log`. (#6180)
+- Add `EventName` and `SetEventName` to `Record` in `go.opentelemetry.io/otel/log`. (#6187)
+- Add `EventName` to `RecordFactory` in `go.opentelemetry.io/otel/log/logtest`. (#6187)
+- `AssertRecordEqual` in `go.opentelemetry.io/otel/log/logtest` checks `Record.EventName`. (#6187)
+- Add `EventName` and `SetEventName` to `Record` in `go.opentelemetry.io/otel/sdk/log`. (#6193)
+- Add `EventName` to `RecordFactory` in `go.opentelemetry.io/otel/sdk/log/logtest`. (#6193)
+- Emit `Record.EventName` field in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#6211)
+- Emit `Record.EventName` field in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6211)
+- Emit `Record.EventName` field in `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` (#6210)
+- The `go.opentelemetry.io/otel/semconv/v1.28.0` package.
+ The package contains semantic conventions from the `v1.28.0` version of the OpenTelemetry Semantic Conventions.
+ See the [migration documentation](./semconv/v1.28.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.27.0`(#6236)
+- The `go.opentelemetry.io/otel/semconv/v1.30.0` package.
+ The package contains semantic conventions from the `v1.30.0` version of the OpenTelemetry Semantic Conventions.
+ See the [migration documentation](./semconv/v1.30.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.28.0`(#6240)
+- Document the pitfalls of using `Resource` as a comparable type.
+ `Resource.Equal` and `Resource.Equivalent` should be used instead. (#6272)
+- Support [Go 1.24]. (#6304)
+- Add `FilterProcessor` and `EnabledParameters` in `go.opentelemetry.io/otel/sdk/log`.
+ It replaces `go.opentelemetry.io/otel/sdk/log/internal/x.FilterProcessor`.
+ Compared to previous version it additionally gives the possibility to filter by resource and instrumentation scope. (#6317)
+
+### Changed
+
+- Update `github.com/prometheus/common` to `v0.62.0`, which changes the `NameValidationScheme` to `NoEscaping`.
+ This allows metrics names to keep original delimiters (e.g. `.`), rather than replacing with underscores.
+ This is controlled by the `Content-Type` header, or can be reverted by setting `NameValidationScheme` to `LegacyValidation` in `github.com/prometheus/common/model`. (#6198)
+
+### Fixes
+
+- Eliminate goroutine leak for the processor returned by `NewSimpleSpanProcessor` in `go.opentelemetry.io/otel/sdk/trace` when `Shutdown` is called and the passed `ctx` is canceled and `SpanExporter.Shutdown` has not returned. (#6368)
+- Eliminate goroutine leak for the processor returned by `NewBatchSpanProcessor` in `go.opentelemetry.io/otel/sdk/trace` when `ForceFlush` is called and the passed `ctx` is canceled and `SpanExporter.Export` has not returned. (#6369)
+
+## [1.34.0/0.56.0/0.10.0] 2025-01-17
+
+### Changed
+
+- Remove the notices from `Logger` to make the whole Logs API user-facing in `go.opentelemetry.io/otel/log`. (#6167)
+
+### Fixed
+
+- Relax minimum Go version to 1.22.0 in various modules. (#6073)
+- The `Type` name logged for the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` client is corrected from `otlphttpgrpc` to `otlptracegrpc`. (#6143)
+- The `Type` name logged for the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlphttpgrpc` client is corrected from `otlphttphttp` to `otlptracehttp`. (#6143)
+
## [1.33.0/0.55.0/0.9.0/0.0.12] 2024-12-12
### Added
@@ -37,9 +92,6 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
- Fix attribute value truncation in `go.opentelemetry.io/otel/sdk/trace`. (#5997)
- Fix attribute value truncation in `go.opentelemetry.io/otel/sdk/log`. (#6032)
-
-
-
## [1.32.0/0.54.0/0.8.0/0.0.11] 2024-11-08
### Added
@@ -3185,7 +3237,9 @@ It contains api and sdk for trace and meter.
- CircleCI build CI manifest files.
- CODEOWNERS file to track owners of this project.
-[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.33.0...HEAD
+[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.35.0...HEAD
+[1.35.0/0.57.0/0.11.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.35.0
+[1.34.0/0.56.0/0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.34.0
[1.33.0/0.55.0/0.9.0/0.0.12]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.33.0
[1.32.0/0.54.0/0.8.0/0.0.11]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.32.0
[1.31.0/0.53.0/0.7.0/0.0.10]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.31.0
@@ -3275,6 +3329,7 @@ It contains api and sdk for trace and meter.
+[Go 1.24]: https://go.dev/doc/go1.24
[Go 1.23]: https://go.dev/doc/go1.23
[Go 1.22]: https://go.dev/doc/go1.22
[Go 1.21]: https://go.dev/doc/go1.21
diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
index 22a2e9dbd..7b8af585a 100644
--- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
+++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
@@ -181,6 +181,18 @@ patterns in the spec.
For a deeper discussion, see
[this](https://github.com/open-telemetry/opentelemetry-specification/issues/165).
+## Tests
+
+Each functionality should be covered by tests.
+
+Performance-critical functionality should also be covered by benchmarks.
+
+- Pull requests adding a performance-critical functionality
+should have `go test -bench` output in their description.
+- Pull requests changing a performance-critical functionality
+should have [`benchstat`](https://pkg.go.dev/golang.org/x/perf/cmd/benchstat)
+output in their description.
+
## Documentation
Each (non-internal, non-test) package must be documented using
diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile
index a7f6d8cc6..226410d74 100644
--- a/vendor/go.opentelemetry.io/otel/Makefile
+++ b/vendor/go.opentelemetry.io/otel/Makefile
@@ -11,6 +11,10 @@ ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {}
GO = go
TIMEOUT = 60
+# User to run as in docker images.
+DOCKER_USER=$(shell id -u):$(shell id -g)
+DEPENDENCIES_DOCKERFILE=./dependencies.Dockerfile
+
.DEFAULT_GOAL := precommit
.PHONY: precommit ci
@@ -81,20 +85,20 @@ PIP := $(PYTOOLS)/pip
WORKDIR := /workdir
# The python image to use for the virtual environment.
-PYTHONIMAGE := python:3.11.3-slim-bullseye
+PYTHONIMAGE := $(shell awk '$$4=="python" {print $$2}' $(DEPENDENCIES_DOCKERFILE))
# Run the python image with the current directory mounted.
-DOCKERPY := docker run --rm -v "$(CURDIR):$(WORKDIR)" -w $(WORKDIR) $(PYTHONIMAGE)
+DOCKERPY := docker run --rm -u $(DOCKER_USER) -v "$(CURDIR):$(WORKDIR)" -w $(WORKDIR) $(PYTHONIMAGE)
# Create a virtual environment for Python tools.
$(PYTOOLS):
# The `--upgrade` flag is needed to ensure that the virtual environment is
# created with the latest pip version.
- @$(DOCKERPY) bash -c "python3 -m venv $(VENVDIR) && $(PIP) install --upgrade pip"
+ @$(DOCKERPY) bash -c "python3 -m venv $(VENVDIR) && $(PIP) install --upgrade --cache-dir=$(WORKDIR)/.cache/pip pip"
# Install python packages into the virtual environment.
$(PYTOOLS)/%: $(PYTOOLS)
- @$(DOCKERPY) $(PIP) install -r requirements.txt
+ @$(DOCKERPY) $(PIP) install --cache-dir=$(WORKDIR)/.cache/pip -r requirements.txt
CODESPELL = $(PYTOOLS)/codespell
$(CODESPELL): PACKAGE=codespell
@@ -119,7 +123,7 @@ vanity-import-fix: $(PORTO)
# Generate go.work file for local development.
.PHONY: go-work
go-work: $(CROSSLINK)
- $(CROSSLINK) work --root=$(shell pwd)
+ $(CROSSLINK) work --root=$(shell pwd) --go=1.22.7
# Build
@@ -265,13 +269,30 @@ check-clean-work-tree:
exit 1; \
fi
+# The weaver docker image to use for semconv-generate.
+WEAVER_IMAGE := $(shell awk '$$4=="weaver" {print $$2}' $(DEPENDENCIES_DOCKERFILE))
+
SEMCONVPKG ?= "semconv/"
.PHONY: semconv-generate
-semconv-generate: $(SEMCONVGEN) $(SEMCONVKIT)
+semconv-generate: $(SEMCONVKIT)
[ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 )
- [ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 )
- $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -z "$(SEMCONVPKG)/capitalizations.txt" -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
- $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=metric -f metric.go -t "$(SEMCONVPKG)/metric_template.j2" -s "$(TAG)"
+ # Ensure the target directory for source code is available.
+ mkdir -p $(PWD)/$(SEMCONVPKG)/${TAG}
+ # Note: We mount a home directory for downloading/storing the semconv repository.
+ # Weaver will automatically clean the cache when finished, but the directories will remain.
+ mkdir -p ~/.weaver
+ docker run --rm \
+ -u $(DOCKER_USER) \
+ --env HOME=/tmp/weaver \
+ --mount 'type=bind,source=$(PWD)/semconv,target=/home/weaver/templates/registry/go,readonly' \
+ --mount 'type=bind,source=$(PWD)/semconv/${TAG},target=/home/weaver/target' \
+ --mount 'type=bind,source=$(HOME)/.weaver,target=/tmp/weaver/.weaver' \
+ $(WEAVER_IMAGE) registry generate \
+ --registry=https://github.com/open-telemetry/semantic-conventions/archive/refs/tags/$(TAG).zip[model] \
+ --templates=/home/weaver/templates \
+ --param tag=$(TAG) \
+ go \
+ /home/weaver/target
$(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)"
.PHONY: gorelease
diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md
index efec27890..8421cd7e5 100644
--- a/vendor/go.opentelemetry.io/otel/README.md
+++ b/vendor/go.opentelemetry.io/otel/README.md
@@ -1,9 +1,11 @@
# OpenTelemetry-Go
-[](https://github.com/open-telemetry/opentelemetry-go/actions?query=workflow%3Aci+branch%3Amain)
+[](https://github.com/open-telemetry/opentelemetry-go/actions/workflows/ci.yml)
[](https://app.codecov.io/gh/open-telemetry/opentelemetry-go?branch=main)
[](https://pkg.go.dev/go.opentelemetry.io/otel)
[](https://goreportcard.com/report/go.opentelemetry.io/otel)
+[](https://scorecard.dev/viewer/?uri=github.com/open-telemetry/opentelemetry-go)
+[](https://www.bestpractices.dev/projects/9996)
[](https://cloud-native.slack.com/archives/C01NPAXACKT)
OpenTelemetry-Go is the [Go](https://golang.org/) implementation of [OpenTelemetry](https://opentelemetry.io/).
@@ -49,18 +51,25 @@ Currently, this project supports the following environments.
| OS | Go Version | Architecture |
|----------|------------|--------------|
+| Ubuntu | 1.24 | amd64 |
| Ubuntu | 1.23 | amd64 |
| Ubuntu | 1.22 | amd64 |
+| Ubuntu | 1.24 | 386 |
| Ubuntu | 1.23 | 386 |
| Ubuntu | 1.22 | 386 |
-| Linux | 1.23 | arm64 |
-| Linux | 1.22 | arm64 |
+| Ubuntu | 1.24 | arm64 |
+| Ubuntu | 1.23 | arm64 |
+| Ubuntu | 1.22 | arm64 |
+| macOS 13 | 1.24 | amd64 |
| macOS 13 | 1.23 | amd64 |
| macOS 13 | 1.22 | amd64 |
+| macOS | 1.24 | arm64 |
| macOS | 1.23 | arm64 |
| macOS | 1.22 | arm64 |
+| Windows | 1.24 | amd64 |
| Windows | 1.23 | amd64 |
| Windows | 1.22 | amd64 |
+| Windows | 1.24 | 386 |
| Windows | 1.23 | 386 |
| Windows | 1.22 | 386 |
diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md
index ffa9b6125..1e13ae54f 100644
--- a/vendor/go.opentelemetry.io/otel/RELEASING.md
+++ b/vendor/go.opentelemetry.io/otel/RELEASING.md
@@ -5,17 +5,14 @@
New versions of the [OpenTelemetry Semantic Conventions] mean new versions of the `semconv` package need to be generated.
The `semconv-generate` make target is used for this.
-1. Checkout a local copy of the [OpenTelemetry Semantic Conventions] to the desired release tag.
-2. Pull the latest `otel/semconvgen` image: `docker pull otel/semconvgen:latest`
-3. Run the `make semconv-generate ...` target from this repository.
+1. Set the `TAG` environment variable to the semantic convention tag you want to generate.
+2. Run the `make semconv-generate ...` target from this repository.
For example,
```sh
-export TAG="v1.21.0" # Change to the release version you are generating.
-export OTEL_SEMCONV_REPO="/absolute/path/to/opentelemetry/semantic-conventions"
-docker pull otel/semconvgen:latest
-make semconv-generate # Uses the exported TAG and OTEL_SEMCONV_REPO.
+export TAG="v1.30.0" # Change to the release version you are generating.
+make semconv-generate # Uses the exported TAG.
```
This should create a new sub-package of [`semconv`](./semconv).
@@ -130,6 +127,6 @@ Importantly, bump any package versions referenced to be the latest one you just
Bump the dependencies in the following Go services:
-- [`accountingservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accountingservice)
-- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkoutservice)
-- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/productcatalogservice)
+- [`accounting`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accounting)
+- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkout)
+- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/product-catalog)
diff --git a/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile
new file mode 100644
index 000000000..e4c4a753c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile
@@ -0,0 +1,3 @@
+# This is a renovate-friendly source of Docker images.
+FROM python:3.13.2-slim-bullseye@sha256:31b581c8218e1f3c58672481b3b7dba8e898852866b408c6a984c22832523935 AS python
+FROM otel/weaver:v0.13.2@sha256:ae7346b992e477f629ea327e0979e8a416a97f7956ab1f7e95ac1f44edf1a893 AS weaver
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md
new file mode 100644
index 000000000..50802d5ae
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md
@@ -0,0 +1,3 @@
+# OTLP Trace Exporter
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/clients.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/clients.go
new file mode 100644
index 000000000..3c1a625c0
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/clients.go
@@ -0,0 +1,43 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
+
+import (
+ "context"
+
+ tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
+)
+
+// Client manages connections to the collector, handles the
+// transformation of data into wire format, and the transmission of that
+// data to the collector.
+type Client interface {
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Start should establish connection(s) to endpoint(s). It is
+ // called just once by the exporter, so the implementation
+ // does not need to worry about idempotence and locking.
+ Start(ctx context.Context) error
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Stop should close the connections. The function is called
+ // only once by the exporter, so the implementation does not
+ // need to worry about idempotence, but it may be called
+ // concurrently with UploadTraces, so proper
+ // locking is required. The function serves as a
+ // synchronization point - after the function returns, the
+ // process of closing connections is assumed to be finished.
+ Stop(ctx context.Context) error
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // UploadTraces should transform the passed traces to the wire
+ // format and send it to the collector. May be called
+ // concurrently.
+ UploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) error
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/doc.go
new file mode 100644
index 000000000..09ad5eadb
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/doc.go
@@ -0,0 +1,10 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+/*
+Package otlptrace contains abstractions for OTLP span exporters.
+See the official OTLP span exporter implementations:
+ - [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc],
+ - [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp].
+*/
+package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go
new file mode 100644
index 000000000..3f0a518ae
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go
@@ -0,0 +1,105 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
+ tracesdk "go.opentelemetry.io/otel/sdk/trace"
+)
+
+var errAlreadyStarted = errors.New("already started")
+
+// Exporter exports trace data in the OTLP wire format.
+type Exporter struct {
+ client Client
+
+ mu sync.RWMutex
+ started bool
+
+ startOnce sync.Once
+ stopOnce sync.Once
+}
+
+// ExportSpans exports a batch of spans.
+func (e *Exporter) ExportSpans(ctx context.Context, ss []tracesdk.ReadOnlySpan) error {
+ protoSpans := tracetransform.Spans(ss)
+ if len(protoSpans) == 0 {
+ return nil
+ }
+
+ err := e.client.UploadTraces(ctx, protoSpans)
+ if err != nil {
+ return fmt.Errorf("traces export: %w", err)
+ }
+ return nil
+}
+
+// Start establishes a connection to the receiving endpoint.
+func (e *Exporter) Start(ctx context.Context) error {
+ err := errAlreadyStarted
+ e.startOnce.Do(func() {
+ e.mu.Lock()
+ e.started = true
+ e.mu.Unlock()
+ err = e.client.Start(ctx)
+ })
+
+ return err
+}
+
+// Shutdown flushes all exports and closes all connections to the receiving endpoint.
+func (e *Exporter) Shutdown(ctx context.Context) error {
+ e.mu.RLock()
+ started := e.started
+ e.mu.RUnlock()
+
+ if !started {
+ return nil
+ }
+
+ var err error
+
+ e.stopOnce.Do(func() {
+ err = e.client.Stop(ctx)
+ e.mu.Lock()
+ e.started = false
+ e.mu.Unlock()
+ })
+
+ return err
+}
+
+var _ tracesdk.SpanExporter = (*Exporter)(nil)
+
+// New constructs a new Exporter and starts it.
+func New(ctx context.Context, client Client) (*Exporter, error) {
+ exp := NewUnstarted(client)
+ if err := exp.Start(ctx); err != nil {
+ return nil, err
+ }
+ return exp, nil
+}
+
+// NewUnstarted constructs a new Exporter and does not start it.
+func NewUnstarted(client Client) *Exporter {
+ return &Exporter{
+ client: client,
+ }
+}
+
+// MarshalLog is the marshaling function used by the logging system to represent this Exporter.
+func (e *Exporter) MarshalLog() interface{} {
+ return struct {
+ Type string
+ Client Client
+ }{
+ Type: "otlptrace",
+ Client: e.client,
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go
new file mode 100644
index 000000000..4571a5ca3
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go
@@ -0,0 +1,147 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
+
+import (
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/resource"
+ commonpb "go.opentelemetry.io/proto/otlp/common/v1"
+)
+
+// KeyValues transforms a slice of attribute KeyValues into OTLP key-values.
+func KeyValues(attrs []attribute.KeyValue) []*commonpb.KeyValue {
+ if len(attrs) == 0 {
+ return nil
+ }
+
+ out := make([]*commonpb.KeyValue, 0, len(attrs))
+ for _, kv := range attrs {
+ out = append(out, KeyValue(kv))
+ }
+ return out
+}
+
+// Iterator transforms an attribute iterator into OTLP key-values.
+func Iterator(iter attribute.Iterator) []*commonpb.KeyValue {
+ l := iter.Len()
+ if l == 0 {
+ return nil
+ }
+
+ out := make([]*commonpb.KeyValue, 0, l)
+ for iter.Next() {
+ out = append(out, KeyValue(iter.Attribute()))
+ }
+ return out
+}
+
+// ResourceAttributes transforms a Resource OTLP key-values.
+func ResourceAttributes(res *resource.Resource) []*commonpb.KeyValue {
+ return Iterator(res.Iter())
+}
+
+// KeyValue transforms an attribute KeyValue into an OTLP key-value.
+func KeyValue(kv attribute.KeyValue) *commonpb.KeyValue {
+ return &commonpb.KeyValue{Key: string(kv.Key), Value: Value(kv.Value)}
+}
+
+// Value transforms an attribute Value into an OTLP AnyValue.
+func Value(v attribute.Value) *commonpb.AnyValue {
+ av := new(commonpb.AnyValue)
+ switch v.Type() {
+ case attribute.BOOL:
+ av.Value = &commonpb.AnyValue_BoolValue{
+ BoolValue: v.AsBool(),
+ }
+ case attribute.BOOLSLICE:
+ av.Value = &commonpb.AnyValue_ArrayValue{
+ ArrayValue: &commonpb.ArrayValue{
+ Values: boolSliceValues(v.AsBoolSlice()),
+ },
+ }
+ case attribute.INT64:
+ av.Value = &commonpb.AnyValue_IntValue{
+ IntValue: v.AsInt64(),
+ }
+ case attribute.INT64SLICE:
+ av.Value = &commonpb.AnyValue_ArrayValue{
+ ArrayValue: &commonpb.ArrayValue{
+ Values: int64SliceValues(v.AsInt64Slice()),
+ },
+ }
+ case attribute.FLOAT64:
+ av.Value = &commonpb.AnyValue_DoubleValue{
+ DoubleValue: v.AsFloat64(),
+ }
+ case attribute.FLOAT64SLICE:
+ av.Value = &commonpb.AnyValue_ArrayValue{
+ ArrayValue: &commonpb.ArrayValue{
+ Values: float64SliceValues(v.AsFloat64Slice()),
+ },
+ }
+ case attribute.STRING:
+ av.Value = &commonpb.AnyValue_StringValue{
+ StringValue: v.AsString(),
+ }
+ case attribute.STRINGSLICE:
+ av.Value = &commonpb.AnyValue_ArrayValue{
+ ArrayValue: &commonpb.ArrayValue{
+ Values: stringSliceValues(v.AsStringSlice()),
+ },
+ }
+ default:
+ av.Value = &commonpb.AnyValue_StringValue{
+ StringValue: "INVALID",
+ }
+ }
+ return av
+}
+
+func boolSliceValues(vals []bool) []*commonpb.AnyValue {
+ converted := make([]*commonpb.AnyValue, len(vals))
+ for i, v := range vals {
+ converted[i] = &commonpb.AnyValue{
+ Value: &commonpb.AnyValue_BoolValue{
+ BoolValue: v,
+ },
+ }
+ }
+ return converted
+}
+
+func int64SliceValues(vals []int64) []*commonpb.AnyValue {
+ converted := make([]*commonpb.AnyValue, len(vals))
+ for i, v := range vals {
+ converted[i] = &commonpb.AnyValue{
+ Value: &commonpb.AnyValue_IntValue{
+ IntValue: v,
+ },
+ }
+ }
+ return converted
+}
+
+func float64SliceValues(vals []float64) []*commonpb.AnyValue {
+ converted := make([]*commonpb.AnyValue, len(vals))
+ for i, v := range vals {
+ converted[i] = &commonpb.AnyValue{
+ Value: &commonpb.AnyValue_DoubleValue{
+ DoubleValue: v,
+ },
+ }
+ }
+ return converted
+}
+
+func stringSliceValues(vals []string) []*commonpb.AnyValue {
+ converted := make([]*commonpb.AnyValue, len(vals))
+ for i, v := range vals {
+ converted[i] = &commonpb.AnyValue{
+ Value: &commonpb.AnyValue_StringValue{
+ StringValue: v,
+ },
+ }
+ }
+ return converted
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go
new file mode 100644
index 000000000..2e7690e43
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
+
+import (
+ "go.opentelemetry.io/otel/sdk/instrumentation"
+ commonpb "go.opentelemetry.io/proto/otlp/common/v1"
+)
+
+func InstrumentationScope(il instrumentation.Scope) *commonpb.InstrumentationScope {
+ if il == (instrumentation.Scope{}) {
+ return nil
+ }
+ return &commonpb.InstrumentationScope{
+ Name: il.Name,
+ Version: il.Version,
+ Attributes: Iterator(il.Attributes.Iter()),
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go
new file mode 100644
index 000000000..db7b698a5
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go
@@ -0,0 +1,17 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
+
+import (
+ "go.opentelemetry.io/otel/sdk/resource"
+ resourcepb "go.opentelemetry.io/proto/otlp/resource/v1"
+)
+
+// Resource transforms a Resource into an OTLP Resource.
+func Resource(r *resource.Resource) *resourcepb.Resource {
+ if r == nil {
+ return nil
+ }
+ return &resourcepb.Resource{Attributes: ResourceAttributes(r)}
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go
new file mode 100644
index 000000000..bf27ef022
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go
@@ -0,0 +1,219 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
+
+import (
+ "math"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ "go.opentelemetry.io/otel/sdk/instrumentation"
+ tracesdk "go.opentelemetry.io/otel/sdk/trace"
+ "go.opentelemetry.io/otel/trace"
+ tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
+)
+
+// Spans transforms a slice of OpenTelemetry spans into a slice of OTLP
+// ResourceSpans.
+func Spans(sdl []tracesdk.ReadOnlySpan) []*tracepb.ResourceSpans {
+ if len(sdl) == 0 {
+ return nil
+ }
+
+ rsm := make(map[attribute.Distinct]*tracepb.ResourceSpans)
+
+ type key struct {
+ r attribute.Distinct
+ is instrumentation.Scope
+ }
+ ssm := make(map[key]*tracepb.ScopeSpans)
+
+ var resources int
+ for _, sd := range sdl {
+ if sd == nil {
+ continue
+ }
+
+ rKey := sd.Resource().Equivalent()
+ k := key{
+ r: rKey,
+ is: sd.InstrumentationScope(),
+ }
+ scopeSpan, iOk := ssm[k]
+ if !iOk {
+ // Either the resource or instrumentation scope were unknown.
+ scopeSpan = &tracepb.ScopeSpans{
+ Scope: InstrumentationScope(sd.InstrumentationScope()),
+ Spans: []*tracepb.Span{},
+ SchemaUrl: sd.InstrumentationScope().SchemaURL,
+ }
+ }
+ scopeSpan.Spans = append(scopeSpan.Spans, span(sd))
+ ssm[k] = scopeSpan
+
+ rs, rOk := rsm[rKey]
+ if !rOk {
+ resources++
+ // The resource was unknown.
+ rs = &tracepb.ResourceSpans{
+ Resource: Resource(sd.Resource()),
+ ScopeSpans: []*tracepb.ScopeSpans{scopeSpan},
+ SchemaUrl: sd.Resource().SchemaURL(),
+ }
+ rsm[rKey] = rs
+ continue
+ }
+
+ // The resource has been seen before. Check if the instrumentation
+ // library lookup was unknown because if so we need to add it to the
+ // ResourceSpans. Otherwise, the instrumentation library has already
+ // been seen and the append we did above will be included it in the
+ // ScopeSpans reference.
+ if !iOk {
+ rs.ScopeSpans = append(rs.ScopeSpans, scopeSpan)
+ }
+ }
+
+ // Transform the categorized map into a slice
+ rss := make([]*tracepb.ResourceSpans, 0, resources)
+ for _, rs := range rsm {
+ rss = append(rss, rs)
+ }
+ return rss
+}
+
+// span transforms a Span into an OTLP span.
+func span(sd tracesdk.ReadOnlySpan) *tracepb.Span {
+ if sd == nil {
+ return nil
+ }
+
+ tid := sd.SpanContext().TraceID()
+ sid := sd.SpanContext().SpanID()
+
+ s := &tracepb.Span{
+ TraceId: tid[:],
+ SpanId: sid[:],
+ TraceState: sd.SpanContext().TraceState().String(),
+ Status: status(sd.Status().Code, sd.Status().Description),
+ StartTimeUnixNano: uint64(max(0, sd.StartTime().UnixNano())), // nolint:gosec // Overflow checked.
+ EndTimeUnixNano: uint64(max(0, sd.EndTime().UnixNano())), // nolint:gosec // Overflow checked.
+ Links: links(sd.Links()),
+ Kind: spanKind(sd.SpanKind()),
+ Name: sd.Name(),
+ Attributes: KeyValues(sd.Attributes()),
+ Events: spanEvents(sd.Events()),
+ DroppedAttributesCount: clampUint32(sd.DroppedAttributes()),
+ DroppedEventsCount: clampUint32(sd.DroppedEvents()),
+ DroppedLinksCount: clampUint32(sd.DroppedLinks()),
+ }
+
+ if psid := sd.Parent().SpanID(); psid.IsValid() {
+ s.ParentSpanId = psid[:]
+ }
+ s.Flags = buildSpanFlags(sd.Parent())
+
+ return s
+}
+
+func clampUint32(v int) uint32 {
+ if v < 0 {
+ return 0
+ }
+ if int64(v) > math.MaxUint32 {
+ return math.MaxUint32
+ }
+ return uint32(v) // nolint: gosec // Overflow/Underflow checked.
+}
+
+// status transform a span code and message into an OTLP span status.
+func status(status codes.Code, message string) *tracepb.Status {
+ var c tracepb.Status_StatusCode
+ switch status {
+ case codes.Ok:
+ c = tracepb.Status_STATUS_CODE_OK
+ case codes.Error:
+ c = tracepb.Status_STATUS_CODE_ERROR
+ default:
+ c = tracepb.Status_STATUS_CODE_UNSET
+ }
+ return &tracepb.Status{
+ Code: c,
+ Message: message,
+ }
+}
+
+// links transforms span Links to OTLP span links.
+func links(links []tracesdk.Link) []*tracepb.Span_Link {
+ if len(links) == 0 {
+ return nil
+ }
+
+ sl := make([]*tracepb.Span_Link, 0, len(links))
+ for _, otLink := range links {
+ // This redefinition is necessary to prevent otLink.*ID[:] copies
+ // being reused -- in short we need a new otLink per iteration.
+ otLink := otLink
+
+ tid := otLink.SpanContext.TraceID()
+ sid := otLink.SpanContext.SpanID()
+
+ flags := buildSpanFlags(otLink.SpanContext)
+
+ sl = append(sl, &tracepb.Span_Link{
+ TraceId: tid[:],
+ SpanId: sid[:],
+ Attributes: KeyValues(otLink.Attributes),
+ DroppedAttributesCount: clampUint32(otLink.DroppedAttributeCount),
+ Flags: flags,
+ })
+ }
+ return sl
+}
+
+func buildSpanFlags(sc trace.SpanContext) uint32 {
+ flags := tracepb.SpanFlags_SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK
+ if sc.IsRemote() {
+ flags |= tracepb.SpanFlags_SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK
+ }
+
+ return uint32(flags) // nolint:gosec // Flags is a bitmask and can't be negative
+}
+
+// spanEvents transforms span Events to an OTLP span events.
+func spanEvents(es []tracesdk.Event) []*tracepb.Span_Event {
+ if len(es) == 0 {
+ return nil
+ }
+
+ events := make([]*tracepb.Span_Event, len(es))
+ // Transform message events
+ for i := 0; i < len(es); i++ {
+ events[i] = &tracepb.Span_Event{
+ Name: es[i].Name,
+ TimeUnixNano: uint64(max(0, es[i].Time.UnixNano())), // nolint:gosec // Overflow checked.
+ Attributes: KeyValues(es[i].Attributes),
+ DroppedAttributesCount: clampUint32(es[i].DroppedAttributeCount),
+ }
+ }
+ return events
+}
+
+// spanKind transforms a SpanKind to an OTLP span kind.
+func spanKind(kind trace.SpanKind) tracepb.Span_SpanKind {
+ switch kind {
+ case trace.SpanKindInternal:
+ return tracepb.Span_SPAN_KIND_INTERNAL
+ case trace.SpanKindClient:
+ return tracepb.Span_SPAN_KIND_CLIENT
+ case trace.SpanKindServer:
+ return tracepb.Span_SPAN_KIND_SERVER
+ case trace.SpanKindProducer:
+ return tracepb.Span_SPAN_KIND_PRODUCER
+ case trace.SpanKindConsumer:
+ return tracepb.Span_SPAN_KIND_CONSUMER
+ default:
+ return tracepb.Span_SPAN_KIND_UNSPECIFIED
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/README.md b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/README.md
new file mode 100644
index 000000000..5309bb7cb
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/README.md
@@ -0,0 +1,3 @@
+# OTLP Trace gRPC Exporter
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go
new file mode 100644
index 000000000..8409b5f8f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go
@@ -0,0 +1,300 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
+
+import (
+ "context"
+ "errors"
+ "sync"
+ "time"
+
+ "google.golang.org/genproto/googleapis/rpc/errdetails"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/status"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry"
+ coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1"
+ tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
+)
+
+type client struct {
+ endpoint string
+ dialOpts []grpc.DialOption
+ metadata metadata.MD
+ exportTimeout time.Duration
+ requestFunc retry.RequestFunc
+
+ // stopCtx is used as a parent context for all exports. Therefore, when it
+ // is canceled with the stopFunc all exports are canceled.
+ stopCtx context.Context
+ // stopFunc cancels stopCtx, stopping any active exports.
+ stopFunc context.CancelFunc
+
+ // ourConn keeps track of where conn was created: true if created here on
+ // Start, or false if passed with an option. This is important on Shutdown
+ // as the conn should only be closed if created here on start. Otherwise,
+ // it is up to the processes that passed the conn to close it.
+ ourConn bool
+ conn *grpc.ClientConn
+ tscMu sync.RWMutex
+ tsc coltracepb.TraceServiceClient
+}
+
+// Compile time check *client implements otlptrace.Client.
+var _ otlptrace.Client = (*client)(nil)
+
+// NewClient creates a new gRPC trace client.
+func NewClient(opts ...Option) otlptrace.Client {
+ return newClient(opts...)
+}
+
+func newClient(opts ...Option) *client {
+ cfg := otlpconfig.NewGRPCConfig(asGRPCOptions(opts)...)
+
+ ctx, cancel := context.WithCancel(context.Background())
+
+ c := &client{
+ endpoint: cfg.Traces.Endpoint,
+ exportTimeout: cfg.Traces.Timeout,
+ requestFunc: cfg.RetryConfig.RequestFunc(retryable),
+ dialOpts: cfg.DialOptions,
+ stopCtx: ctx,
+ stopFunc: cancel,
+ conn: cfg.GRPCConn,
+ }
+
+ if len(cfg.Traces.Headers) > 0 {
+ c.metadata = metadata.New(cfg.Traces.Headers)
+ }
+
+ return c
+}
+
+// Start establishes a gRPC connection to the collector.
+func (c *client) Start(context.Context) error {
+ if c.conn == nil {
+ // If the caller did not provide a ClientConn when the client was
+ // created, create one using the configuration they did provide.
+ conn, err := grpc.NewClient(c.endpoint, c.dialOpts...)
+ if err != nil {
+ return err
+ }
+ // Keep track that we own the lifecycle of this conn and need to close
+ // it on Shutdown.
+ c.ourConn = true
+ c.conn = conn
+ }
+
+ // The otlptrace.Client interface states this method is called just once,
+ // so no need to check if already started.
+ c.tscMu.Lock()
+ c.tsc = coltracepb.NewTraceServiceClient(c.conn)
+ c.tscMu.Unlock()
+
+ return nil
+}
+
+var errAlreadyStopped = errors.New("the client is already stopped")
+
+// Stop shuts down the client.
+//
+// Any active connections to a remote endpoint are closed if they were created
+// by the client. Any gRPC connection passed during creation using
+// WithGRPCConn will not be closed. It is the caller's responsibility to
+// handle cleanup of that resource.
+//
+// This method synchronizes with the UploadTraces method of the client. It
+// will wait for any active calls to that method to complete unimpeded, or it
+// will cancel any active calls if ctx expires. If ctx expires, the context
+// error will be forwarded as the returned error. All client held resources
+// will still be released in this situation.
+//
+// If the client has already stopped, an error will be returned describing
+// this.
+func (c *client) Stop(ctx context.Context) error {
+ // Make sure to return context error if the context is done when calling this method.
+ err := ctx.Err()
+
+ // Acquire the c.tscMu lock within the ctx lifetime.
+ acquired := make(chan struct{})
+ go func() {
+ c.tscMu.Lock()
+ close(acquired)
+ }()
+
+ select {
+ case <-ctx.Done():
+ // The Stop timeout is reached. Kill any remaining exports to force
+ // the clear of the lock and save the timeout error to return and
+ // signal the shutdown timed out before cleanly stopping.
+ c.stopFunc()
+ err = ctx.Err()
+
+ // To ensure the client is not left in a dirty state c.tsc needs to be
+ // set to nil. To avoid the race condition when doing this, ensure
+ // that all the exports are killed (initiated by c.stopFunc).
+ <-acquired
+ case <-acquired:
+ }
+ // Hold the tscMu lock for the rest of the function to ensure no new
+ // exports are started.
+ defer c.tscMu.Unlock()
+
+ // The otlptrace.Client interface states this method is called only
+ // once, but there is no guarantee it is called after Start. Ensure the
+ // client is started before doing anything and let the called know if they
+ // made a mistake.
+ if c.tsc == nil {
+ return errAlreadyStopped
+ }
+
+ // Clear c.tsc to signal the client is stopped.
+ c.tsc = nil
+
+ if c.ourConn {
+ closeErr := c.conn.Close()
+ // A context timeout error takes precedence over this error.
+ if err == nil && closeErr != nil {
+ err = closeErr
+ }
+ }
+ return err
+}
+
+var errShutdown = errors.New("the client is shutdown")
+
+// UploadTraces sends a batch of spans.
+//
+// Retryable errors from the server will be handled according to any
+// RetryConfig the client was created with.
+func (c *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) error {
+ // Hold a read lock to ensure a shut down initiated after this starts does
+ // not abandon the export. This read lock acquire has less priority than a
+ // write lock acquire (i.e. Stop), meaning if the client is shutting down
+ // this will come after the shut down.
+ c.tscMu.RLock()
+ defer c.tscMu.RUnlock()
+
+ if c.tsc == nil {
+ return errShutdown
+ }
+
+ ctx, cancel := c.exportContext(ctx)
+ defer cancel()
+
+ return c.requestFunc(ctx, func(iCtx context.Context) error {
+ resp, err := c.tsc.Export(iCtx, &coltracepb.ExportTraceServiceRequest{
+ ResourceSpans: protoSpans,
+ })
+ if resp != nil && resp.PartialSuccess != nil {
+ msg := resp.PartialSuccess.GetErrorMessage()
+ n := resp.PartialSuccess.GetRejectedSpans()
+ if n != 0 || msg != "" {
+ err := internal.TracePartialSuccessError(n, msg)
+ otel.Handle(err)
+ }
+ }
+ // nil is converted to OK.
+ if status.Code(err) == codes.OK {
+ // Success.
+ return nil
+ }
+ return err
+ })
+}
+
+// exportContext returns a copy of parent with an appropriate deadline and
+// cancellation function.
+//
+// It is the callers responsibility to cancel the returned context once its
+// use is complete, via the parent or directly with the returned CancelFunc, to
+// ensure all resources are correctly released.
+func (c *client) exportContext(parent context.Context) (context.Context, context.CancelFunc) {
+ var (
+ ctx context.Context
+ cancel context.CancelFunc
+ )
+
+ if c.exportTimeout > 0 {
+ ctx, cancel = context.WithTimeout(parent, c.exportTimeout)
+ } else {
+ ctx, cancel = context.WithCancel(parent)
+ }
+
+ if c.metadata.Len() > 0 {
+ md := c.metadata
+ if outMD, ok := metadata.FromOutgoingContext(ctx); ok {
+ md = metadata.Join(md, outMD)
+ }
+
+ ctx = metadata.NewOutgoingContext(ctx, md)
+ }
+
+ // Unify the client stopCtx with the parent.
+ go func() {
+ select {
+ case <-ctx.Done():
+ case <-c.stopCtx.Done():
+ // Cancel the export as the shutdown has timed out.
+ cancel()
+ }
+ }()
+
+ return ctx, cancel
+}
+
+// retryable returns if err identifies a request that can be retried and a
+// duration to wait for if an explicit throttle time is included in err.
+func retryable(err error) (bool, time.Duration) {
+ s := status.Convert(err)
+ return retryableGRPCStatus(s)
+}
+
+func retryableGRPCStatus(s *status.Status) (bool, time.Duration) {
+ switch s.Code() {
+ case codes.Canceled,
+ codes.DeadlineExceeded,
+ codes.Aborted,
+ codes.OutOfRange,
+ codes.Unavailable,
+ codes.DataLoss:
+ // Additionally handle RetryInfo.
+ _, d := throttleDelay(s)
+ return true, d
+ case codes.ResourceExhausted:
+ // Retry only if the server signals that the recovery from resource exhaustion is possible.
+ return throttleDelay(s)
+ }
+
+ // Not a retry-able error.
+ return false, 0
+}
+
+// throttleDelay returns of the status is RetryInfo
+// and the its duration to wait for if an explicit throttle time.
+func throttleDelay(s *status.Status) (bool, time.Duration) {
+ for _, detail := range s.Details() {
+ if t, ok := detail.(*errdetails.RetryInfo); ok {
+ return true, t.RetryDelay.AsDuration()
+ }
+ }
+ return false, 0
+}
+
+// MarshalLog is the marshaling function used by the logging system to represent this Client.
+func (c *client) MarshalLog() interface{} {
+ return struct {
+ Type string
+ Endpoint string
+ }{
+ Type: "otlptracegrpc",
+ Endpoint: c.endpoint,
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go
new file mode 100644
index 000000000..b7bd429ff
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go
@@ -0,0 +1,65 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+/*
+Package otlptracegrpc provides an OTLP span exporter using gRPC.
+By default the telemetry is sent to https://localhost:4317.
+
+Exporter should be created using [New].
+
+The environment variables described below can be used for configuration.
+
+OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT (default: "https://localhost:4317") -
+target to which the exporter sends telemetry.
+The target syntax is defined in https://github.com/grpc/grpc/blob/master/doc/naming.md.
+The value must contain a scheme ("http" or "https") and host.
+The value may additionally contain a port, and a path.
+The value should not contain a query string or fragment.
+OTEL_EXPORTER_OTLP_TRACES_ENDPOINT takes precedence over OTEL_EXPORTER_OTLP_ENDPOINT.
+The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WithInsecure], and [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_INSECURE, OTEL_EXPORTER_OTLP_TRACES_INSECURE (default: "false") -
+setting "true" disables client transport security for the exporter's gRPC connection.
+You can use this only when an endpoint is provided without the http or https scheme.
+OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT setting overrides
+the scheme defined via OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT.
+OTEL_EXPORTER_OTLP_TRACES_INSECURE takes precedence over OTEL_EXPORTER_OTLP_INSECURE.
+The configuration can be overridden by [WithInsecure], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_TRACES_HEADERS (default: none) -
+key-value pairs used as gRPC metadata associated with gRPC requests.
+The value is expected to be represented in a format matching the [W3C Baggage HTTP Header Content Format],
+except that additional semi-colon delimited metadata is not supported.
+Example value: "key1=value1,key2=value2".
+OTEL_EXPORTER_OTLP_TRACES_HEADERS takes precedence over OTEL_EXPORTER_OTLP_HEADERS.
+The configuration can be overridden by [WithHeaders] option.
+
+OTEL_EXPORTER_OTLP_TIMEOUT, OTEL_EXPORTER_OTLP_TRACES_TIMEOUT (default: "10000") -
+maximum time in milliseconds the OTLP exporter waits for each batch export.
+OTEL_EXPORTER_OTLP_TRACES_TIMEOUT takes precedence over OTEL_EXPORTER_OTLP_TIMEOUT.
+The configuration can be overridden by [WithTimeout] option.
+
+OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_TRACES_COMPRESSION (default: none) -
+the gRPC compressor the exporter uses.
+Supported value: "gzip".
+OTEL_EXPORTER_OTLP_TRACES_COMPRESSION takes precedence over OTEL_EXPORTER_OTLP_COMPRESSION.
+The configuration can be overridden by [WithCompressor], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE (default: none) -
+the filepath to the trusted certificate to use when verifying a server's TLS credentials.
+OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CERTIFICATE.
+The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE (default: none) -
+the filepath to the client certificate/chain trust for client's private key to use in mTLS communication in PEM format.
+OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE.
+The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY (default: none) -
+the filepath to the client's private key to use in mTLS communication in PEM format.
+OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY takes precedence over OTEL_EXPORTER_OTLP_CLIENT_KEY.
+The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] option.
+
+[W3C Baggage HTTP Header Content Format]: https://www.w3.org/TR/baggage/#header-content
+*/
+package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/exporter.go
new file mode 100644
index 000000000..b826b8424
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/exporter.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
+)
+
+// New constructs a new Exporter and starts it.
+func New(ctx context.Context, opts ...Option) (*otlptrace.Exporter, error) {
+ return otlptrace.New(ctx, NewClient(opts...))
+}
+
+// NewUnstarted constructs a new Exporter and does not start it.
+func NewUnstarted(opts ...Option) *otlptrace.Exporter {
+ return otlptrace.NewUnstarted(NewClient(opts...))
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go
new file mode 100644
index 000000000..4abf48d1f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go
@@ -0,0 +1,215 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/envconfig/envconfig.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig"
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "errors"
+ "fmt"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+ "unicode"
+
+ "go.opentelemetry.io/otel/internal/global"
+)
+
+// ConfigFn is the generic function used to set a config.
+type ConfigFn func(*EnvOptionsReader)
+
+// EnvOptionsReader reads the required environment variables.
+type EnvOptionsReader struct {
+ GetEnv func(string) string
+ ReadFile func(string) ([]byte, error)
+ Namespace string
+}
+
+// Apply runs every ConfigFn.
+func (e *EnvOptionsReader) Apply(opts ...ConfigFn) {
+ for _, o := range opts {
+ o(e)
+ }
+}
+
+// GetEnvValue gets an OTLP environment variable value of the specified key
+// using the GetEnv function.
+// This function prepends the OTLP specified namespace to all key lookups.
+func (e *EnvOptionsReader) GetEnvValue(key string) (string, bool) {
+ v := strings.TrimSpace(e.GetEnv(keyWithNamespace(e.Namespace, key)))
+ return v, v != ""
+}
+
+// WithString retrieves the specified config and passes it to ConfigFn as a string.
+func WithString(n string, fn func(string)) func(e *EnvOptionsReader) {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ fn(v)
+ }
+ }
+}
+
+// WithBool returns a ConfigFn that reads the environment variable n and if it exists passes its parsed bool value to fn.
+func WithBool(n string, fn func(bool)) ConfigFn {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ b := strings.ToLower(v) == "true"
+ fn(b)
+ }
+ }
+}
+
+// WithDuration retrieves the specified config and passes it to ConfigFn as a duration.
+func WithDuration(n string, fn func(time.Duration)) func(e *EnvOptionsReader) {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ d, err := strconv.Atoi(v)
+ if err != nil {
+ global.Error(err, "parse duration", "input", v)
+ return
+ }
+ fn(time.Duration(d) * time.Millisecond)
+ }
+ }
+}
+
+// WithHeaders retrieves the specified config and passes it to ConfigFn as a map of HTTP headers.
+func WithHeaders(n string, fn func(map[string]string)) func(e *EnvOptionsReader) {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ fn(stringToHeader(v))
+ }
+ }
+}
+
+// WithURL retrieves the specified config and passes it to ConfigFn as a net/url.URL.
+func WithURL(n string, fn func(*url.URL)) func(e *EnvOptionsReader) {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ u, err := url.Parse(v)
+ if err != nil {
+ global.Error(err, "parse url", "input", v)
+ return
+ }
+ fn(u)
+ }
+ }
+}
+
+// WithCertPool returns a ConfigFn that reads the environment variable n as a filepath to a TLS certificate pool. If it exists, it is parsed as a crypto/x509.CertPool and it is passed to fn.
+func WithCertPool(n string, fn func(*x509.CertPool)) ConfigFn {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ b, err := e.ReadFile(v)
+ if err != nil {
+ global.Error(err, "read tls ca cert file", "file", v)
+ return
+ }
+ c, err := createCertPool(b)
+ if err != nil {
+ global.Error(err, "create tls cert pool")
+ return
+ }
+ fn(c)
+ }
+ }
+}
+
+// WithClientCert returns a ConfigFn that reads the environment variable nc and nk as filepaths to a client certificate and key pair. If they exists, they are parsed as a crypto/tls.Certificate and it is passed to fn.
+func WithClientCert(nc, nk string, fn func(tls.Certificate)) ConfigFn {
+ return func(e *EnvOptionsReader) {
+ vc, okc := e.GetEnvValue(nc)
+ vk, okk := e.GetEnvValue(nk)
+ if !okc || !okk {
+ return
+ }
+ cert, err := e.ReadFile(vc)
+ if err != nil {
+ global.Error(err, "read tls client cert", "file", vc)
+ return
+ }
+ key, err := e.ReadFile(vk)
+ if err != nil {
+ global.Error(err, "read tls client key", "file", vk)
+ return
+ }
+ crt, err := tls.X509KeyPair(cert, key)
+ if err != nil {
+ global.Error(err, "create tls client key pair")
+ return
+ }
+ fn(crt)
+ }
+}
+
+func keyWithNamespace(ns, key string) string {
+ if ns == "" {
+ return key
+ }
+ return fmt.Sprintf("%s_%s", ns, key)
+}
+
+func stringToHeader(value string) map[string]string {
+ headersPairs := strings.Split(value, ",")
+ headers := make(map[string]string)
+
+ for _, header := range headersPairs {
+ n, v, found := strings.Cut(header, "=")
+ if !found {
+ global.Error(errors.New("missing '="), "parse headers", "input", header)
+ continue
+ }
+
+ trimmedName := strings.TrimSpace(n)
+
+ // Validate the key.
+ if !isValidHeaderKey(trimmedName) {
+ global.Error(errors.New("invalid header key"), "parse headers", "key", trimmedName)
+ continue
+ }
+
+ // Only decode the value.
+ value, err := url.PathUnescape(v)
+ if err != nil {
+ global.Error(err, "escape header value", "value", v)
+ continue
+ }
+ trimmedValue := strings.TrimSpace(value)
+
+ headers[trimmedName] = trimmedValue
+ }
+
+ return headers
+}
+
+func createCertPool(certBytes []byte) (*x509.CertPool, error) {
+ cp := x509.NewCertPool()
+ if ok := cp.AppendCertsFromPEM(certBytes); !ok {
+ return nil, errors.New("failed to append certificate to the cert pool")
+ }
+ return cp, nil
+}
+
+func isValidHeaderKey(key string) bool {
+ if key == "" {
+ return false
+ }
+ for _, c := range key {
+ if !isTokenChar(c) {
+ return false
+ }
+ }
+ return true
+}
+
+func isTokenChar(c rune) bool {
+ return c <= unicode.MaxASCII && (unicode.IsLetter(c) ||
+ unicode.IsDigit(c) ||
+ c == '!' || c == '#' || c == '$' || c == '%' || c == '&' || c == '\'' || c == '*' ||
+ c == '+' || c == '-' || c == '.' || c == '^' || c == '_' || c == '`' || c == '|' || c == '~')
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go
new file mode 100644
index 000000000..97cd6c54f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go
@@ -0,0 +1,24 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal"
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess.go.tmpl "--data={}" --out=partialsuccess.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess_test.go.tmpl "--data={}" --out=partialsuccess_test.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry.go.tmpl "--data={}" --out=retry/retry.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry_test.go.tmpl "--data={}" --out=retry/retry_test.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig.go.tmpl "--data={}" --out=envconfig/envconfig.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig_test.go.tmpl "--data={}" --out=envconfig/envconfig_test.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig\"}" --out=otlpconfig/envconfig.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl "--data={\"retryImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry\"}" --out=otlpconfig/options.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/options_test.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig\"}" --out=otlpconfig/options_test.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/optiontypes.go.tmpl "--data={}" --out=otlpconfig/optiontypes.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/tls.go.tmpl "--data={}" --out=otlpconfig/tls.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/client.go.tmpl "--data={}" --out=otlptracetest/client.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/collector.go.tmpl "--data={}" --out=otlptracetest/collector.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/data.go.tmpl "--data={}" --out=otlptracetest/data.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/otlptest.go.tmpl "--data={}" --out=otlptracetest/otlptest.go
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go
new file mode 100644
index 000000000..7bb189a94
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go
@@ -0,0 +1,142 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "net/url"
+ "os"
+ "path"
+ "strings"
+ "time"
+
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig"
+)
+
+// DefaultEnvOptionsReader is the default environments reader.
+var DefaultEnvOptionsReader = envconfig.EnvOptionsReader{
+ GetEnv: os.Getenv,
+ ReadFile: os.ReadFile,
+ Namespace: "OTEL_EXPORTER_OTLP",
+}
+
+// ApplyGRPCEnvConfigs applies the env configurations for gRPC.
+func ApplyGRPCEnvConfigs(cfg Config) Config {
+ opts := getOptionsFromEnv()
+ for _, opt := range opts {
+ cfg = opt.ApplyGRPCOption(cfg)
+ }
+ return cfg
+}
+
+// ApplyHTTPEnvConfigs applies the env configurations for HTTP.
+func ApplyHTTPEnvConfigs(cfg Config) Config {
+ opts := getOptionsFromEnv()
+ for _, opt := range opts {
+ cfg = opt.ApplyHTTPOption(cfg)
+ }
+ return cfg
+}
+
+func getOptionsFromEnv() []GenericOption {
+ opts := []GenericOption{}
+
+ tlsConf := &tls.Config{}
+ DefaultEnvOptionsReader.Apply(
+ envconfig.WithURL("ENDPOINT", func(u *url.URL) {
+ opts = append(opts, withEndpointScheme(u))
+ opts = append(opts, newSplitOption(func(cfg Config) Config {
+ cfg.Traces.Endpoint = u.Host
+ // For OTLP/HTTP endpoint URLs without a per-signal
+ // configuration, the passed endpoint is used as a base URL
+ // and the signals are sent to these paths relative to that.
+ cfg.Traces.URLPath = path.Join(u.Path, DefaultTracesPath)
+ return cfg
+ }, withEndpointForGRPC(u)))
+ }),
+ envconfig.WithURL("TRACES_ENDPOINT", func(u *url.URL) {
+ opts = append(opts, withEndpointScheme(u))
+ opts = append(opts, newSplitOption(func(cfg Config) Config {
+ cfg.Traces.Endpoint = u.Host
+ // For endpoint URLs for OTLP/HTTP per-signal variables, the
+ // URL MUST be used as-is without any modification. The only
+ // exception is that if an URL contains no path part, the root
+ // path / MUST be used.
+ path := u.Path
+ if path == "" {
+ path = "/"
+ }
+ cfg.Traces.URLPath = path
+ return cfg
+ }, withEndpointForGRPC(u)))
+ }),
+ envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
+ envconfig.WithCertPool("TRACES_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
+ envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
+ envconfig.WithClientCert("TRACES_CLIENT_CERTIFICATE", "TRACES_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
+ withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }),
+ envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
+ envconfig.WithBool("TRACES_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
+ envconfig.WithHeaders("HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
+ envconfig.WithHeaders("TRACES_HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
+ WithEnvCompression("COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
+ WithEnvCompression("TRACES_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
+ envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
+ envconfig.WithDuration("TRACES_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
+ )
+
+ return opts
+}
+
+func withEndpointScheme(u *url.URL) GenericOption {
+ switch strings.ToLower(u.Scheme) {
+ case "http", "unix":
+ return WithInsecure()
+ default:
+ return WithSecure()
+ }
+}
+
+func withEndpointForGRPC(u *url.URL) func(cfg Config) Config {
+ return func(cfg Config) Config {
+ // For OTLP/gRPC endpoints, this is the target to which the
+ // exporter is going to send telemetry.
+ cfg.Traces.Endpoint = path.Join(u.Host, u.Path)
+ return cfg
+ }
+}
+
+// WithEnvCompression retrieves the specified config and passes it to ConfigFn as a Compression.
+func WithEnvCompression(n string, fn func(Compression)) func(e *envconfig.EnvOptionsReader) {
+ return func(e *envconfig.EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ cp := NoCompression
+ if v == "gzip" {
+ cp = GzipCompression
+ }
+
+ fn(cp)
+ }
+ }
+}
+
+// revive:disable-next-line:flag-parameter
+func withInsecure(b bool) GenericOption {
+ if b {
+ return WithInsecure()
+ }
+ return WithSecure()
+}
+
+func withTLSConfig(c *tls.Config, fn func(*tls.Config)) func(e *envconfig.EnvOptionsReader) {
+ return func(e *envconfig.EnvOptionsReader) {
+ if c.RootCAs != nil || len(c.Certificates) > 0 {
+ fn(c)
+ }
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go
new file mode 100644
index 000000000..0a317d926
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go
@@ -0,0 +1,351 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
+
+import (
+ "crypto/tls"
+ "fmt"
+ "net/http"
+ "net/url"
+ "path"
+ "strings"
+ "time"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/backoff"
+ "google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/credentials/insecure"
+ "google.golang.org/grpc/encoding/gzip"
+
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry"
+ "go.opentelemetry.io/otel/internal/global"
+)
+
+const (
+ // DefaultTracesPath is a default URL path for endpoint that
+ // receives spans.
+ DefaultTracesPath string = "/v1/traces"
+ // DefaultTimeout is a default max waiting time for the backend to process
+ // each span batch.
+ DefaultTimeout time.Duration = 10 * time.Second
+)
+
+type (
+ // HTTPTransportProxyFunc is a function that resolves which URL to use as proxy for a given request.
+ // This type is compatible with `http.Transport.Proxy` and can be used to set a custom proxy function to the OTLP HTTP client.
+ HTTPTransportProxyFunc func(*http.Request) (*url.URL, error)
+
+ SignalConfig struct {
+ Endpoint string
+ Insecure bool
+ TLSCfg *tls.Config
+ Headers map[string]string
+ Compression Compression
+ Timeout time.Duration
+ URLPath string
+
+ // gRPC configurations
+ GRPCCredentials credentials.TransportCredentials
+
+ Proxy HTTPTransportProxyFunc
+ }
+
+ Config struct {
+ // Signal specific configurations
+ Traces SignalConfig
+
+ RetryConfig retry.Config
+
+ // gRPC configurations
+ ReconnectionPeriod time.Duration
+ ServiceConfig string
+ DialOptions []grpc.DialOption
+ GRPCConn *grpc.ClientConn
+ }
+)
+
+// NewHTTPConfig returns a new Config with all settings applied from opts and
+// any unset setting using the default HTTP config values.
+func NewHTTPConfig(opts ...HTTPOption) Config {
+ cfg := Config{
+ Traces: SignalConfig{
+ Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorHTTPPort),
+ URLPath: DefaultTracesPath,
+ Compression: NoCompression,
+ Timeout: DefaultTimeout,
+ },
+ RetryConfig: retry.DefaultConfig,
+ }
+ cfg = ApplyHTTPEnvConfigs(cfg)
+ for _, opt := range opts {
+ cfg = opt.ApplyHTTPOption(cfg)
+ }
+ cfg.Traces.URLPath = cleanPath(cfg.Traces.URLPath, DefaultTracesPath)
+ return cfg
+}
+
+// cleanPath returns a path with all spaces trimmed and all redundancies
+// removed. If urlPath is empty or cleaning it results in an empty string,
+// defaultPath is returned instead.
+func cleanPath(urlPath string, defaultPath string) string {
+ tmp := path.Clean(strings.TrimSpace(urlPath))
+ if tmp == "." {
+ return defaultPath
+ }
+ if !path.IsAbs(tmp) {
+ tmp = "/" + tmp
+ }
+ return tmp
+}
+
+// NewGRPCConfig returns a new Config with all settings applied from opts and
+// any unset setting using the default gRPC config values.
+func NewGRPCConfig(opts ...GRPCOption) Config {
+ userAgent := "OTel OTLP Exporter Go/" + otlptrace.Version()
+ cfg := Config{
+ Traces: SignalConfig{
+ Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort),
+ URLPath: DefaultTracesPath,
+ Compression: NoCompression,
+ Timeout: DefaultTimeout,
+ },
+ RetryConfig: retry.DefaultConfig,
+ DialOptions: []grpc.DialOption{grpc.WithUserAgent(userAgent)},
+ }
+ cfg = ApplyGRPCEnvConfigs(cfg)
+ for _, opt := range opts {
+ cfg = opt.ApplyGRPCOption(cfg)
+ }
+
+ if cfg.ServiceConfig != "" {
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig))
+ }
+ // Prioritize GRPCCredentials over Insecure (passing both is an error).
+ if cfg.Traces.GRPCCredentials != nil {
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Traces.GRPCCredentials))
+ } else if cfg.Traces.Insecure {
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(insecure.NewCredentials()))
+ } else {
+ // Default to using the host's root CA.
+ creds := credentials.NewTLS(nil)
+ cfg.Traces.GRPCCredentials = creds
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(creds))
+ }
+ if cfg.Traces.Compression == GzipCompression {
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name)))
+ }
+ if cfg.ReconnectionPeriod != 0 {
+ p := grpc.ConnectParams{
+ Backoff: backoff.DefaultConfig,
+ MinConnectTimeout: cfg.ReconnectionPeriod,
+ }
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithConnectParams(p))
+ }
+
+ return cfg
+}
+
+type (
+ // GenericOption applies an option to the HTTP or gRPC driver.
+ GenericOption interface {
+ ApplyHTTPOption(Config) Config
+ ApplyGRPCOption(Config) Config
+
+ // A private method to prevent users implementing the
+ // interface and so future additions to it will not
+ // violate compatibility.
+ private()
+ }
+
+ // HTTPOption applies an option to the HTTP driver.
+ HTTPOption interface {
+ ApplyHTTPOption(Config) Config
+
+ // A private method to prevent users implementing the
+ // interface and so future additions to it will not
+ // violate compatibility.
+ private()
+ }
+
+ // GRPCOption applies an option to the gRPC driver.
+ GRPCOption interface {
+ ApplyGRPCOption(Config) Config
+
+ // A private method to prevent users implementing the
+ // interface and so future additions to it will not
+ // violate compatibility.
+ private()
+ }
+)
+
+// genericOption is an option that applies the same logic
+// for both gRPC and HTTP.
+type genericOption struct {
+ fn func(Config) Config
+}
+
+func (g *genericOption) ApplyGRPCOption(cfg Config) Config {
+ return g.fn(cfg)
+}
+
+func (g *genericOption) ApplyHTTPOption(cfg Config) Config {
+ return g.fn(cfg)
+}
+
+func (genericOption) private() {}
+
+func newGenericOption(fn func(cfg Config) Config) GenericOption {
+ return &genericOption{fn: fn}
+}
+
+// splitOption is an option that applies different logics
+// for gRPC and HTTP.
+type splitOption struct {
+ httpFn func(Config) Config
+ grpcFn func(Config) Config
+}
+
+func (g *splitOption) ApplyGRPCOption(cfg Config) Config {
+ return g.grpcFn(cfg)
+}
+
+func (g *splitOption) ApplyHTTPOption(cfg Config) Config {
+ return g.httpFn(cfg)
+}
+
+func (splitOption) private() {}
+
+func newSplitOption(httpFn func(cfg Config) Config, grpcFn func(cfg Config) Config) GenericOption {
+ return &splitOption{httpFn: httpFn, grpcFn: grpcFn}
+}
+
+// httpOption is an option that is only applied to the HTTP driver.
+type httpOption struct {
+ fn func(Config) Config
+}
+
+func (h *httpOption) ApplyHTTPOption(cfg Config) Config {
+ return h.fn(cfg)
+}
+
+func (httpOption) private() {}
+
+func NewHTTPOption(fn func(cfg Config) Config) HTTPOption {
+ return &httpOption{fn: fn}
+}
+
+// grpcOption is an option that is only applied to the gRPC driver.
+type grpcOption struct {
+ fn func(Config) Config
+}
+
+func (h *grpcOption) ApplyGRPCOption(cfg Config) Config {
+ return h.fn(cfg)
+}
+
+func (grpcOption) private() {}
+
+func NewGRPCOption(fn func(cfg Config) Config) GRPCOption {
+ return &grpcOption{fn: fn}
+}
+
+// Generic Options
+
+// WithEndpoint configures the trace host and port only; endpoint should
+// resemble "example.com" or "localhost:4317". To configure the scheme and path,
+// use WithEndpointURL.
+func WithEndpoint(endpoint string) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Traces.Endpoint = endpoint
+ return cfg
+ })
+}
+
+// WithEndpointURL configures the trace scheme, host, port, and path; the
+// provided value should resemble "https://example.com:4318/v1/traces".
+func WithEndpointURL(v string) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ u, err := url.Parse(v)
+ if err != nil {
+ global.Error(err, "otlptrace: parse endpoint url", "url", v)
+ return cfg
+ }
+
+ cfg.Traces.Endpoint = u.Host
+ cfg.Traces.URLPath = u.Path
+ cfg.Traces.Insecure = u.Scheme != "https"
+
+ return cfg
+ })
+}
+
+func WithCompression(compression Compression) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Traces.Compression = compression
+ return cfg
+ })
+}
+
+func WithURLPath(urlPath string) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Traces.URLPath = urlPath
+ return cfg
+ })
+}
+
+func WithRetry(rc retry.Config) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.RetryConfig = rc
+ return cfg
+ })
+}
+
+func WithTLSClientConfig(tlsCfg *tls.Config) GenericOption {
+ return newSplitOption(func(cfg Config) Config {
+ cfg.Traces.TLSCfg = tlsCfg.Clone()
+ return cfg
+ }, func(cfg Config) Config {
+ cfg.Traces.GRPCCredentials = credentials.NewTLS(tlsCfg)
+ return cfg
+ })
+}
+
+func WithInsecure() GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Traces.Insecure = true
+ return cfg
+ })
+}
+
+func WithSecure() GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Traces.Insecure = false
+ return cfg
+ })
+}
+
+func WithHeaders(headers map[string]string) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Traces.Headers = headers
+ return cfg
+ })
+}
+
+func WithTimeout(duration time.Duration) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Traces.Timeout = duration
+ return cfg
+ })
+}
+
+func WithProxy(pf HTTPTransportProxyFunc) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Traces.Proxy = pf
+ return cfg
+ })
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go
new file mode 100644
index 000000000..3d4f699d4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go
@@ -0,0 +1,40 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlptrace/otlpconfig/optiontypes.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
+
+const (
+ // DefaultCollectorGRPCPort is the default gRPC port of the collector.
+ DefaultCollectorGRPCPort uint16 = 4317
+ // DefaultCollectorHTTPPort is the default HTTP port of the collector.
+ DefaultCollectorHTTPPort uint16 = 4318
+ // DefaultCollectorHost is the host address the Exporter will attempt
+ // connect to if no collector address is provided.
+ DefaultCollectorHost string = "localhost"
+)
+
+// Compression describes the compression used for payloads sent to the
+// collector.
+type Compression int
+
+const (
+ // NoCompression tells the driver to send payloads without
+ // compression.
+ NoCompression Compression = iota
+ // GzipCompression tells the driver to send payloads after
+ // compressing them with gzip.
+ GzipCompression
+)
+
+// Marshaler describes the kind of message format sent to the collector.
+type Marshaler int
+
+const (
+ // MarshalProto tells the driver to send using the protobuf binary format.
+ MarshalProto Marshaler = iota
+ // MarshalJSON tells the driver to send using json format.
+ MarshalJSON
+)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go
new file mode 100644
index 000000000..38b97a013
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go
@@ -0,0 +1,26 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlptrace/otlpconfig/tls.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "errors"
+)
+
+// CreateTLSConfig creates a tls.Config from a raw certificate bytes
+// to verify a server certificate.
+func CreateTLSConfig(certBytes []byte) (*tls.Config, error) {
+ cp := x509.NewCertPool()
+ if ok := cp.AppendCertsFromPEM(certBytes); !ok {
+ return nil, errors.New("failed to append certificate to the cert pool")
+ }
+
+ return &tls.Config{
+ RootCAs: cp,
+ }, nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go
new file mode 100644
index 000000000..a12ea4c48
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go
@@ -0,0 +1,56 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/partialsuccess.go
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal"
+
+import "fmt"
+
+// PartialSuccess represents the underlying error for all handling
+// OTLP partial success messages. Use `errors.Is(err,
+// PartialSuccess{})` to test whether an error passed to the OTel
+// error handler belongs to this category.
+type PartialSuccess struct {
+ ErrorMessage string
+ RejectedItems int64
+ RejectedKind string
+}
+
+var _ error = PartialSuccess{}
+
+// Error implements the error interface.
+func (ps PartialSuccess) Error() string {
+ msg := ps.ErrorMessage
+ if msg == "" {
+ msg = "empty message"
+ }
+ return fmt.Sprintf("OTLP partial success: %s (%d %s rejected)", msg, ps.RejectedItems, ps.RejectedKind)
+}
+
+// Is supports the errors.Is() interface.
+func (ps PartialSuccess) Is(err error) bool {
+ _, ok := err.(PartialSuccess)
+ return ok
+}
+
+// TracePartialSuccessError returns an error describing a partial success
+// response for the trace signal.
+func TracePartialSuccessError(itemsRejected int64, errorMessage string) error {
+ return PartialSuccess{
+ ErrorMessage: errorMessage,
+ RejectedItems: itemsRejected,
+ RejectedKind: "spans",
+ }
+}
+
+// MetricPartialSuccessError returns an error describing a partial success
+// response for the metric signal.
+func MetricPartialSuccessError(itemsRejected int64, errorMessage string) error {
+ return PartialSuccess{
+ ErrorMessage: errorMessage,
+ RejectedItems: itemsRejected,
+ RejectedKind: "metric data points",
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go
new file mode 100644
index 000000000..1c5450ab6
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go
@@ -0,0 +1,145 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/retry/retry.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package retry provides request retry functionality that can perform
+// configurable exponential backoff for transient errors and honor any
+// explicit throttle responses received.
+package retry // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry"
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/cenkalti/backoff/v4"
+)
+
+// DefaultConfig are the recommended defaults to use.
+var DefaultConfig = Config{
+ Enabled: true,
+ InitialInterval: 5 * time.Second,
+ MaxInterval: 30 * time.Second,
+ MaxElapsedTime: time.Minute,
+}
+
+// Config defines configuration for retrying batches in case of export failure
+// using an exponential backoff.
+type Config struct {
+ // Enabled indicates whether to not retry sending batches in case of
+ // export failure.
+ Enabled bool
+ // InitialInterval the time to wait after the first failure before
+ // retrying.
+ InitialInterval time.Duration
+ // MaxInterval is the upper bound on backoff interval. Once this value is
+ // reached the delay between consecutive retries will always be
+ // `MaxInterval`.
+ MaxInterval time.Duration
+ // MaxElapsedTime is the maximum amount of time (including retries) spent
+ // trying to send a request/batch. Once this value is reached, the data
+ // is discarded.
+ MaxElapsedTime time.Duration
+}
+
+// RequestFunc wraps a request with retry logic.
+type RequestFunc func(context.Context, func(context.Context) error) error
+
+// EvaluateFunc returns if an error is retry-able and if an explicit throttle
+// duration should be honored that was included in the error.
+//
+// The function must return true if the error argument is retry-able,
+// otherwise it must return false for the first return parameter.
+//
+// The function must return a non-zero time.Duration if the error contains
+// explicit throttle duration that should be honored, otherwise it must return
+// a zero valued time.Duration.
+type EvaluateFunc func(error) (bool, time.Duration)
+
+// RequestFunc returns a RequestFunc using the evaluate function to determine
+// if requests can be retried and based on the exponential backoff
+// configuration of c.
+func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
+ if !c.Enabled {
+ return func(ctx context.Context, fn func(context.Context) error) error {
+ return fn(ctx)
+ }
+ }
+
+ return func(ctx context.Context, fn func(context.Context) error) error {
+ // Do not use NewExponentialBackOff since it calls Reset and the code here
+ // must call Reset after changing the InitialInterval (this saves an
+ // unnecessary call to Now).
+ b := &backoff.ExponentialBackOff{
+ InitialInterval: c.InitialInterval,
+ RandomizationFactor: backoff.DefaultRandomizationFactor,
+ Multiplier: backoff.DefaultMultiplier,
+ MaxInterval: c.MaxInterval,
+ MaxElapsedTime: c.MaxElapsedTime,
+ Stop: backoff.Stop,
+ Clock: backoff.SystemClock,
+ }
+ b.Reset()
+
+ for {
+ err := fn(ctx)
+ if err == nil {
+ return nil
+ }
+
+ retryable, throttle := evaluate(err)
+ if !retryable {
+ return err
+ }
+
+ bOff := b.NextBackOff()
+ if bOff == backoff.Stop {
+ return fmt.Errorf("max retry time elapsed: %w", err)
+ }
+
+ // Wait for the greater of the backoff or throttle delay.
+ var delay time.Duration
+ if bOff > throttle {
+ delay = bOff
+ } else {
+ elapsed := b.GetElapsedTime()
+ if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime {
+ return fmt.Errorf("max retry time would elapse: %w", err)
+ }
+ delay = throttle
+ }
+
+ if ctxErr := waitFunc(ctx, delay); ctxErr != nil {
+ return fmt.Errorf("%w: %w", ctxErr, err)
+ }
+ }
+ }
+}
+
+// Allow override for testing.
+var waitFunc = wait
+
+// wait takes the caller's context, and the amount of time to wait. It will
+// return nil if the timer fires before or at the same time as the context's
+// deadline. This indicates that the call can be retried.
+func wait(ctx context.Context, delay time.Duration) error {
+ timer := time.NewTimer(delay)
+ defer timer.Stop()
+
+ select {
+ case <-ctx.Done():
+ // Handle the case where the timer and context deadline end
+ // simultaneously by prioritizing the timer expiration nil value
+ // response.
+ select {
+ case <-timer.C:
+ default:
+ return ctx.Err()
+ }
+ case <-timer.C:
+ }
+
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go
new file mode 100644
index 000000000..00ab1f20c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go
@@ -0,0 +1,210 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
+
+import (
+ "fmt"
+ "time"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry"
+)
+
+// Option applies an option to the gRPC driver.
+type Option interface {
+ applyGRPCOption(otlpconfig.Config) otlpconfig.Config
+}
+
+func asGRPCOptions(opts []Option) []otlpconfig.GRPCOption {
+ converted := make([]otlpconfig.GRPCOption, len(opts))
+ for i, o := range opts {
+ converted[i] = otlpconfig.NewGRPCOption(o.applyGRPCOption)
+ }
+ return converted
+}
+
+// RetryConfig defines configuration for retrying export of span batches that
+// failed to be received by the target endpoint.
+//
+// This configuration does not define any network retry strategy. That is
+// entirely handled by the gRPC ClientConn.
+type RetryConfig retry.Config
+
+type wrappedOption struct {
+ otlpconfig.GRPCOption
+}
+
+func (w wrappedOption) applyGRPCOption(cfg otlpconfig.Config) otlpconfig.Config {
+ return w.ApplyGRPCOption(cfg)
+}
+
+// WithInsecure disables client transport security for the exporter's gRPC
+// connection just like grpc.WithInsecure()
+// (https://pkg.go.dev/google.golang.org/grpc#WithInsecure) does. Note, by
+// default, client security is required unless WithInsecure is used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithInsecure() Option {
+ return wrappedOption{otlpconfig.WithInsecure()}
+}
+
+// WithEndpoint sets the target endpoint (host and port) the Exporter will
+// connect to. The provided endpoint should resemble "example.com:4317" (no
+// scheme or path).
+//
+// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_TRACES_ENDPOINT
+// environment variable is set, and this option is not passed, that variable
+// value will be used. If both environment variables are set,
+// OTEL_EXPORTER_OTLP_TRACES_ENDPOINT will take precedence. If an environment
+// variable is set, and this option is passed, this option will take precedence.
+//
+// If both this option and WithEndpointURL are used, the last used option will
+// take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, "localhost:4317" will be used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithEndpoint(endpoint string) Option {
+ return wrappedOption{otlpconfig.WithEndpoint(endpoint)}
+}
+
+// WithEndpointURL sets the target endpoint URL (scheme, host, port, path)
+// the Exporter will connect to. The provided endpoint URL should resemble
+// "https://example.com:4318/v1/traces".
+//
+// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_TRACES_ENDPOINT
+// environment variable is set, and this option is not passed, that variable
+// value will be used. If both environment variables are set,
+// OTEL_EXPORTER_OTLP_TRACES_ENDPOINT will take precedence. If an environment
+// variable is set, and this option is passed, this option will take precedence.
+//
+// If both this option and WithEndpoint are used, the last used option will
+// take precedence.
+//
+// If an invalid URL is provided, the default value will be kept.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, "https://localhost:4317/v1/traces" will be used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithEndpointURL(u string) Option {
+ return wrappedOption{otlpconfig.WithEndpointURL(u)}
+}
+
+// WithReconnectionPeriod set the minimum amount of time between connection
+// attempts to the target endpoint.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithReconnectionPeriod(rp time.Duration) Option {
+ return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
+ cfg.ReconnectionPeriod = rp
+ return cfg
+ })}
+}
+
+func compressorToCompression(compressor string) otlpconfig.Compression {
+ if compressor == "gzip" {
+ return otlpconfig.GzipCompression
+ }
+
+ otel.Handle(fmt.Errorf("invalid compression type: '%s', using no compression as default", compressor))
+ return otlpconfig.NoCompression
+}
+
+// WithCompressor sets the compressor for the gRPC client to use when sending
+// requests. Supported compressor values: "gzip".
+func WithCompressor(compressor string) Option {
+ return wrappedOption{otlpconfig.WithCompression(compressorToCompression(compressor))}
+}
+
+// WithHeaders will send the provided headers with each gRPC requests.
+func WithHeaders(headers map[string]string) Option {
+ return wrappedOption{otlpconfig.WithHeaders(headers)}
+}
+
+// WithTLSCredentials allows the connection to use TLS credentials when
+// talking to the server. It takes in grpc.TransportCredentials instead of say
+// a Certificate file or a tls.Certificate, because the retrieving of these
+// credentials can be done in many ways e.g. plain file, in code tls.Config or
+// by certificate rotation, so it is up to the caller to decide what to use.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithTLSCredentials(creds credentials.TransportCredentials) Option {
+ return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
+ cfg.Traces.GRPCCredentials = creds
+ return cfg
+ })}
+}
+
+// WithServiceConfig defines the default gRPC service config used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithServiceConfig(serviceConfig string) Option {
+ return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
+ cfg.ServiceConfig = serviceConfig
+ return cfg
+ })}
+}
+
+// WithDialOption sets explicit grpc.DialOptions to use when making a
+// connection. The options here are appended to the internal grpc.DialOptions
+// used so they will take precedence over any other internal grpc.DialOptions
+// they might conflict with.
+// The [grpc.WithBlock], [grpc.WithTimeout], and [grpc.WithReturnConnectionError]
+// grpc.DialOptions are ignored.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithDialOption(opts ...grpc.DialOption) Option {
+ return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
+ cfg.DialOptions = opts
+ return cfg
+ })}
+}
+
+// WithGRPCConn sets conn as the gRPC ClientConn used for all communication.
+//
+// This option takes precedence over any other option that relates to
+// establishing or persisting a gRPC connection to a target endpoint. Any
+// other option of those types passed will be ignored.
+//
+// It is the callers responsibility to close the passed conn. The client
+// Shutdown method will not close this connection.
+func WithGRPCConn(conn *grpc.ClientConn) Option {
+ return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
+ cfg.GRPCConn = conn
+ return cfg
+ })}
+}
+
+// WithTimeout sets the max amount of time a client will attempt to export a
+// batch of spans. This takes precedence over any retry settings defined with
+// WithRetry, once this time limit has been reached the export is abandoned
+// and the batch of spans is dropped.
+//
+// If unset, the default timeout will be set to 10 seconds.
+func WithTimeout(duration time.Duration) Option {
+ return wrappedOption{otlpconfig.WithTimeout(duration)}
+}
+
+// WithRetry sets the retry policy for transient retryable errors that may be
+// returned by the target endpoint when exporting a batch of spans.
+//
+// If the target endpoint responds with not only a retryable error, but
+// explicitly returns a backoff time in the response. That time will take
+// precedence over these settings.
+//
+// These settings do not define any network retry strategy. That is entirely
+// handled by the gRPC ClientConn.
+//
+// If unset, the default retry policy will be used. It will retry the export
+// 5 seconds after receiving a retryable error and increase exponentially
+// after each error for no more than a total time of 1 minute.
+func WithRetry(settings RetryConfig) Option {
+ return wrappedOption{otlpconfig.WithRetry(retry.Config(settings))}
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go
new file mode 100644
index 000000000..f156ee667
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
+
+// Version is the current release version of the OpenTelemetry OTLP trace exporter in use.
+func Version() string {
+ return "1.34.0"
+}
diff --git a/vendor/go.opentelemetry.io/otel/renovate.json b/vendor/go.opentelemetry.io/otel/renovate.json
index 0a29a2f13..a6fa353f9 100644
--- a/vendor/go.opentelemetry.io/otel/renovate.json
+++ b/vendor/go.opentelemetry.io/otel/renovate.json
@@ -1,7 +1,7 @@
{
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
"extends": [
- "config:recommended"
+ "config:best-practices"
],
"ignorePaths": [],
"labels": ["Skip Changelog", "dependencies"],
@@ -15,10 +15,8 @@
"enabled": true
},
{
- "matchFileNames": ["internal/tools/**"],
- "matchManagers": ["gomod"],
- "matchDepTypes": ["indirect"],
- "enabled": false
+ "matchPackageNames": ["go.opentelemetry.io/build-tools/**"],
+ "groupName": "build-tools"
},
{
"matchPackageNames": ["google.golang.org/genproto/googleapis/**"],
diff --git a/vendor/go.opentelemetry.io/otel/requirements.txt b/vendor/go.opentelemetry.io/otel/requirements.txt
index ab09daf9d..1bb55fb1c 100644
--- a/vendor/go.opentelemetry.io/otel/requirements.txt
+++ b/vendor/go.opentelemetry.io/otel/requirements.txt
@@ -1 +1 @@
-codespell==2.3.0
+codespell==2.4.1
diff --git a/vendor/go.opentelemetry.io/otel/sdk/LICENSE b/vendor/go.opentelemetry.io/otel/sdk/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/go.opentelemetry.io/otel/sdk/README.md b/vendor/go.opentelemetry.io/otel/sdk/README.md
new file mode 100644
index 000000000..f81b1576a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/README.md
@@ -0,0 +1,3 @@
+# SDK
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/sdk)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/README.md b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/README.md
new file mode 100644
index 000000000..06e6d8685
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/README.md
@@ -0,0 +1,3 @@
+# SDK Instrumentation
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/instrumentation)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go
new file mode 100644
index 000000000..a4faa6a03
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go
@@ -0,0 +1,13 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package instrumentation provides types to represent the code libraries that
+// provide OpenTelemetry instrumentation. These types are used in the
+// OpenTelemetry signal pipelines to identify the source of telemetry.
+//
+// See
+// https://github.com/open-telemetry/oteps/blob/d226b677d73a785523fe9b9701be13225ebc528d/text/0083-component.md
+// and
+// https://github.com/open-telemetry/oteps/blob/d226b677d73a785523fe9b9701be13225ebc528d/text/0201-scope-attributes.md
+// for more information.
+package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go
new file mode 100644
index 000000000..f2cdf3c65
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation"
+
+// Library represents the instrumentation library.
+//
+// Deprecated: use [Scope] instead.
+type Library = Scope
diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go
new file mode 100644
index 000000000..34852a47b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go
@@ -0,0 +1,19 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// Scope represents the instrumentation scope.
+type Scope struct {
+ // Name is the name of the instrumentation scope. This should be the
+ // Go package name of that scope.
+ Name string
+ // Version is the version of the instrumentation scope.
+ Version string
+ // SchemaURL of the telemetry emitted by the scope.
+ SchemaURL string
+ // Attributes of the telemetry emitted by the scope.
+ Attributes attribute.Set
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go b/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go
new file mode 100644
index 000000000..07923ed8d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go
@@ -0,0 +1,166 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package env // import "go.opentelemetry.io/otel/sdk/internal/env"
+
+import (
+ "os"
+ "strconv"
+
+ "go.opentelemetry.io/otel/internal/global"
+)
+
+// Environment variable names.
+const (
+ // BatchSpanProcessorScheduleDelayKey is the delay interval between two
+ // consecutive exports (i.e. 5000).
+ BatchSpanProcessorScheduleDelayKey = "OTEL_BSP_SCHEDULE_DELAY"
+ // BatchSpanProcessorExportTimeoutKey is the maximum allowed time to
+ // export data (i.e. 3000).
+ BatchSpanProcessorExportTimeoutKey = "OTEL_BSP_EXPORT_TIMEOUT"
+ // BatchSpanProcessorMaxQueueSizeKey is the maximum queue size (i.e. 2048).
+ BatchSpanProcessorMaxQueueSizeKey = "OTEL_BSP_MAX_QUEUE_SIZE"
+ // BatchSpanProcessorMaxExportBatchSizeKey is the maximum batch size (i.e.
+ // 512). Note: it must be less than or equal to
+ // BatchSpanProcessorMaxQueueSize.
+ BatchSpanProcessorMaxExportBatchSizeKey = "OTEL_BSP_MAX_EXPORT_BATCH_SIZE"
+
+ // AttributeValueLengthKey is the maximum allowed attribute value size.
+ AttributeValueLengthKey = "OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT"
+
+ // AttributeCountKey is the maximum allowed span attribute count.
+ AttributeCountKey = "OTEL_ATTRIBUTE_COUNT_LIMIT"
+
+ // SpanAttributeValueLengthKey is the maximum allowed attribute value size
+ // for a span.
+ SpanAttributeValueLengthKey = "OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT"
+
+ // SpanAttributeCountKey is the maximum allowed span attribute count for a
+ // span.
+ SpanAttributeCountKey = "OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT"
+
+ // SpanEventCountKey is the maximum allowed span event count.
+ SpanEventCountKey = "OTEL_SPAN_EVENT_COUNT_LIMIT"
+
+ // SpanEventAttributeCountKey is the maximum allowed attribute per span
+ // event count.
+ SpanEventAttributeCountKey = "OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT"
+
+ // SpanLinkCountKey is the maximum allowed span link count.
+ SpanLinkCountKey = "OTEL_SPAN_LINK_COUNT_LIMIT"
+
+ // SpanLinkAttributeCountKey is the maximum allowed attribute per span
+ // link count.
+ SpanLinkAttributeCountKey = "OTEL_LINK_ATTRIBUTE_COUNT_LIMIT"
+)
+
+// firstInt returns the value of the first matching environment variable from
+// keys. If the value is not an integer or no match is found, defaultValue is
+// returned.
+func firstInt(defaultValue int, keys ...string) int {
+ for _, key := range keys {
+ value := os.Getenv(key)
+ if value == "" {
+ continue
+ }
+
+ intValue, err := strconv.Atoi(value)
+ if err != nil {
+ global.Info("Got invalid value, number value expected.", key, value)
+ return defaultValue
+ }
+
+ return intValue
+ }
+
+ return defaultValue
+}
+
+// IntEnvOr returns the int value of the environment variable with name key if
+// it exists, it is not empty, and the value is an int. Otherwise, defaultValue is returned.
+func IntEnvOr(key string, defaultValue int) int {
+ value := os.Getenv(key)
+ if value == "" {
+ return defaultValue
+ }
+
+ intValue, err := strconv.Atoi(value)
+ if err != nil {
+ global.Info("Got invalid value, number value expected.", key, value)
+ return defaultValue
+ }
+
+ return intValue
+}
+
+// BatchSpanProcessorScheduleDelay returns the environment variable value for
+// the OTEL_BSP_SCHEDULE_DELAY key if it exists, otherwise defaultValue is
+// returned.
+func BatchSpanProcessorScheduleDelay(defaultValue int) int {
+ return IntEnvOr(BatchSpanProcessorScheduleDelayKey, defaultValue)
+}
+
+// BatchSpanProcessorExportTimeout returns the environment variable value for
+// the OTEL_BSP_EXPORT_TIMEOUT key if it exists, otherwise defaultValue is
+// returned.
+func BatchSpanProcessorExportTimeout(defaultValue int) int {
+ return IntEnvOr(BatchSpanProcessorExportTimeoutKey, defaultValue)
+}
+
+// BatchSpanProcessorMaxQueueSize returns the environment variable value for
+// the OTEL_BSP_MAX_QUEUE_SIZE key if it exists, otherwise defaultValue is
+// returned.
+func BatchSpanProcessorMaxQueueSize(defaultValue int) int {
+ return IntEnvOr(BatchSpanProcessorMaxQueueSizeKey, defaultValue)
+}
+
+// BatchSpanProcessorMaxExportBatchSize returns the environment variable value for
+// the OTEL_BSP_MAX_EXPORT_BATCH_SIZE key if it exists, otherwise defaultValue
+// is returned.
+func BatchSpanProcessorMaxExportBatchSize(defaultValue int) int {
+ return IntEnvOr(BatchSpanProcessorMaxExportBatchSizeKey, defaultValue)
+}
+
+// SpanAttributeValueLength returns the environment variable value for the
+// OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT key if it exists. Otherwise, the
+// environment variable value for OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT is
+// returned or defaultValue if that is not set.
+func SpanAttributeValueLength(defaultValue int) int {
+ return firstInt(defaultValue, SpanAttributeValueLengthKey, AttributeValueLengthKey)
+}
+
+// SpanAttributeCount returns the environment variable value for the
+// OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT key if it exists. Otherwise, the
+// environment variable value for OTEL_ATTRIBUTE_COUNT_LIMIT is returned or
+// defaultValue if that is not set.
+func SpanAttributeCount(defaultValue int) int {
+ return firstInt(defaultValue, SpanAttributeCountKey, AttributeCountKey)
+}
+
+// SpanEventCount returns the environment variable value for the
+// OTEL_SPAN_EVENT_COUNT_LIMIT key if it exists, otherwise defaultValue is
+// returned.
+func SpanEventCount(defaultValue int) int {
+ return IntEnvOr(SpanEventCountKey, defaultValue)
+}
+
+// SpanEventAttributeCount returns the environment variable value for the
+// OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT key if it exists, otherwise defaultValue
+// is returned.
+func SpanEventAttributeCount(defaultValue int) int {
+ return IntEnvOr(SpanEventAttributeCountKey, defaultValue)
+}
+
+// SpanLinkCount returns the environment variable value for the
+// OTEL_SPAN_LINK_COUNT_LIMIT key if it exists, otherwise defaultValue is
+// returned.
+func SpanLinkCount(defaultValue int) int {
+ return IntEnvOr(SpanLinkCountKey, defaultValue)
+}
+
+// SpanLinkAttributeCount returns the environment variable value for the
+// OTEL_LINK_ATTRIBUTE_COUNT_LIMIT key if it exists, otherwise defaultValue is
+// returned.
+func SpanLinkAttributeCount(defaultValue int) int {
+ return IntEnvOr(SpanLinkAttributeCountKey, defaultValue)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md b/vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md
new file mode 100644
index 000000000..fab61647c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md
@@ -0,0 +1,46 @@
+# Experimental Features
+
+The SDK contains features that have not yet stabilized in the OpenTelemetry specification.
+These features are added to the OpenTelemetry Go SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback.
+
+These feature may change in backwards incompatible ways as feedback is applied.
+See the [Compatibility and Stability](#compatibility-and-stability) section for more information.
+
+## Features
+
+- [Resource](#resource)
+
+### Resource
+
+[OpenTelemetry resource semantic conventions] include many attribute definitions that are defined as experimental.
+To have experimental semantic conventions be added by [resource detectors] set the `OTEL_GO_X_RESOURCE` environment variable.
+The value set must be the case-insensitive string of `"true"` to enable the feature.
+All other values are ignored.
+
+
+
+[OpenTelemetry resource semantic conventions]: https://opentelemetry.io/docs/specs/semconv/resource/
+[resource detectors]: https://pkg.go.dev/go.opentelemetry.io/otel/sdk/resource#Detector
+
+#### Examples
+
+Enable experimental resource semantic conventions.
+
+```console
+export OTEL_GO_X_RESOURCE=true
+```
+
+Disable experimental resource semantic conventions.
+
+```console
+unset OTEL_GO_X_RESOURCE
+```
+
+## Compatibility and Stability
+
+Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../VERSIONING.md).
+These features may be removed or modified in successive version releases, including patch versions.
+
+When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release.
+There is no guarantee that any environment variable feature flags that enabled the experimental feature will be supported by the stable version.
+If they are supported, they may be accompanied with a deprecation notice stating a timeline for the removal of that support.
diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go
new file mode 100644
index 000000000..68d296cbe
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go
@@ -0,0 +1,66 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package x contains support for OTel SDK experimental features.
+//
+// This package should only be used for features defined in the specification.
+// It should not be used for experiments or new project ideas.
+package x // import "go.opentelemetry.io/otel/sdk/internal/x"
+
+import (
+ "os"
+ "strings"
+)
+
+// Resource is an experimental feature flag that defines if resource detectors
+// should be included experimental semantic conventions.
+//
+// To enable this feature set the OTEL_GO_X_RESOURCE environment variable
+// to the case-insensitive string value of "true" (i.e. "True" and "TRUE"
+// will also enable this).
+var Resource = newFeature("RESOURCE", func(v string) (string, bool) {
+ if strings.ToLower(v) == "true" {
+ return v, true
+ }
+ return "", false
+})
+
+// Feature is an experimental feature control flag. It provides a uniform way
+// to interact with these feature flags and parse their values.
+type Feature[T any] struct {
+ key string
+ parse func(v string) (T, bool)
+}
+
+func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] {
+ const envKeyRoot = "OTEL_GO_X_"
+ return Feature[T]{
+ key: envKeyRoot + suffix,
+ parse: parse,
+ }
+}
+
+// Key returns the environment variable key that needs to be set to enable the
+// feature.
+func (f Feature[T]) Key() string { return f.key }
+
+// Lookup returns the user configured value for the feature and true if the
+// user has enabled the feature. Otherwise, if the feature is not enabled, a
+// zero-value and false are returned.
+func (f Feature[T]) Lookup() (v T, ok bool) {
+ // https://github.com/open-telemetry/opentelemetry-specification/blob/62effed618589a0bec416a87e559c0a9d96289bb/specification/configuration/sdk-environment-variables.md#parsing-empty-value
+ //
+ // > The SDK MUST interpret an empty value of an environment variable the
+ // > same way as when the variable is unset.
+ vRaw := os.Getenv(f.key)
+ if vRaw == "" {
+ return v, ok
+ }
+ return f.parse(vRaw)
+}
+
+// Enabled returns if the feature is enabled.
+func (f Feature[T]) Enabled() bool {
+ _, ok := f.Lookup()
+ return ok
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/README.md b/vendor/go.opentelemetry.io/otel/sdk/resource/README.md
new file mode 100644
index 000000000..4ad864d71
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/README.md
@@ -0,0 +1,3 @@
+# SDK Resource
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/resource)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go
new file mode 100644
index 000000000..c02aeefdd
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go
@@ -0,0 +1,92 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "context"
+ "errors"
+ "fmt"
+)
+
+// ErrPartialResource is returned by a detector when complete source
+// information for a Resource is unavailable or the source information
+// contains invalid values that are omitted from the returned Resource.
+var ErrPartialResource = errors.New("partial resource")
+
+// Detector detects OpenTelemetry resource information.
+type Detector interface {
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Detect returns an initialized Resource based on gathered information.
+ // If the source information to construct a Resource contains invalid
+ // values, a Resource is returned with the valid parts of the source
+ // information used for initialization along with an appropriately
+ // wrapped ErrPartialResource error.
+ Detect(ctx context.Context) (*Resource, error)
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+}
+
+// Detect returns a new [Resource] merged from all the Resources each of the
+// detectors produces. Each of the detectors are called sequentially, in the
+// order they are passed, merging the produced resource into the previous.
+//
+// This may return a partial Resource along with an error containing
+// [ErrPartialResource] if that error is returned from a detector. It may also
+// return a merge-conflicting Resource along with an error containing
+// [ErrSchemaURLConflict] if merging Resources from different detectors results
+// in a schema URL conflict. It is up to the caller to determine if this
+// returned Resource should be used or not.
+//
+// If one of the detectors returns an error that is not [ErrPartialResource],
+// the resource produced by the detector will not be merged and the returned
+// error will wrap that detector's error.
+func Detect(ctx context.Context, detectors ...Detector) (*Resource, error) {
+ r := new(Resource)
+ return r, detect(ctx, r, detectors)
+}
+
+// detect runs all detectors using ctx and merges the result into res. This
+// assumes res is allocated and not nil, it will panic otherwise.
+//
+// If the detectors or merging resources produces any errors (i.e.
+// [ErrPartialResource] [ErrSchemaURLConflict]), a single error wrapping all of
+// these errors will be returned. Otherwise, nil is returned.
+func detect(ctx context.Context, res *Resource, detectors []Detector) error {
+ var (
+ r *Resource
+ err error
+ e error
+ )
+
+ for _, detector := range detectors {
+ if detector == nil {
+ continue
+ }
+ r, e = detector.Detect(ctx)
+ if e != nil {
+ err = errors.Join(err, e)
+ if !errors.Is(e, ErrPartialResource) {
+ continue
+ }
+ }
+ r, e = Merge(res, r)
+ if e != nil {
+ err = errors.Join(err, e)
+ }
+ *res = *r
+ }
+
+ if err != nil {
+ if errors.Is(err, ErrSchemaURLConflict) {
+ // If there has been a merge conflict, ensure the resource has no
+ // schema URL.
+ res.schemaURL = ""
+ }
+
+ err = fmt.Errorf("error detecting resource: %w", err)
+ }
+ return err
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go
new file mode 100644
index 000000000..cf3c88e15
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go
@@ -0,0 +1,116 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/google/uuid"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk"
+ semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
+)
+
+type (
+ // telemetrySDK is a Detector that provides information about
+ // the OpenTelemetry SDK used. This Detector is included as a
+ // builtin. If these resource attributes are not wanted, use
+ // resource.New() to explicitly disable them.
+ telemetrySDK struct{}
+
+ // host is a Detector that provides information about the host
+ // being run on. This Detector is included as a builtin. If
+ // these resource attributes are not wanted, use the
+ // resource.New() to explicitly disable them.
+ host struct{}
+
+ stringDetector struct {
+ schemaURL string
+ K attribute.Key
+ F func() (string, error)
+ }
+
+ defaultServiceNameDetector struct{}
+
+ defaultServiceInstanceIDDetector struct{}
+)
+
+var (
+ _ Detector = telemetrySDK{}
+ _ Detector = host{}
+ _ Detector = stringDetector{}
+ _ Detector = defaultServiceNameDetector{}
+ _ Detector = defaultServiceInstanceIDDetector{}
+)
+
+// Detect returns a *Resource that describes the OpenTelemetry SDK used.
+func (telemetrySDK) Detect(context.Context) (*Resource, error) {
+ return NewWithAttributes(
+ semconv.SchemaURL,
+ semconv.TelemetrySDKName("opentelemetry"),
+ semconv.TelemetrySDKLanguageGo,
+ semconv.TelemetrySDKVersion(sdk.Version()),
+ ), nil
+}
+
+// Detect returns a *Resource that describes the host being run on.
+func (host) Detect(ctx context.Context) (*Resource, error) {
+ return StringDetector(semconv.SchemaURL, semconv.HostNameKey, os.Hostname).Detect(ctx)
+}
+
+// StringDetector returns a Detector that will produce a *Resource
+// containing the string as a value corresponding to k. The resulting Resource
+// will have the specified schemaURL.
+func StringDetector(schemaURL string, k attribute.Key, f func() (string, error)) Detector {
+ return stringDetector{schemaURL: schemaURL, K: k, F: f}
+}
+
+// Detect returns a *Resource that describes the string as a value
+// corresponding to attribute.Key as well as the specific schemaURL.
+func (sd stringDetector) Detect(ctx context.Context) (*Resource, error) {
+ value, err := sd.F()
+ if err != nil {
+ return nil, fmt.Errorf("%s: %w", string(sd.K), err)
+ }
+ a := sd.K.String(value)
+ if !a.Valid() {
+ return nil, fmt.Errorf("invalid attribute: %q -> %q", a.Key, a.Value.Emit())
+ }
+ return NewWithAttributes(sd.schemaURL, sd.K.String(value)), nil
+}
+
+// Detect implements Detector.
+func (defaultServiceNameDetector) Detect(ctx context.Context) (*Resource, error) {
+ return StringDetector(
+ semconv.SchemaURL,
+ semconv.ServiceNameKey,
+ func() (string, error) {
+ executable, err := os.Executable()
+ if err != nil {
+ return "unknown_service:go", nil
+ }
+ return "unknown_service:" + filepath.Base(executable), nil
+ },
+ ).Detect(ctx)
+}
+
+// Detect implements Detector.
+func (defaultServiceInstanceIDDetector) Detect(ctx context.Context) (*Resource, error) {
+ return StringDetector(
+ semconv.SchemaURL,
+ semconv.ServiceInstanceIDKey,
+ func() (string, error) {
+ version4Uuid, err := uuid.NewRandom()
+ if err != nil {
+ return "", err
+ }
+
+ return version4Uuid.String(), nil
+ },
+ ).Detect(ctx)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/config.go b/vendor/go.opentelemetry.io/otel/sdk/resource/config.go
new file mode 100644
index 000000000..0d6e213d9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/config.go
@@ -0,0 +1,195 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/attribute"
+)
+
+// config contains configuration for Resource creation.
+type config struct {
+ // detectors that will be evaluated.
+ detectors []Detector
+ // SchemaURL to associate with the Resource.
+ schemaURL string
+}
+
+// Option is the interface that applies a configuration option.
+type Option interface {
+ // apply sets the Option value of a config.
+ apply(config) config
+}
+
+// WithAttributes adds attributes to the configured Resource.
+func WithAttributes(attributes ...attribute.KeyValue) Option {
+ return WithDetectors(detectAttributes{attributes})
+}
+
+type detectAttributes struct {
+ attributes []attribute.KeyValue
+}
+
+func (d detectAttributes) Detect(context.Context) (*Resource, error) {
+ return NewSchemaless(d.attributes...), nil
+}
+
+// WithDetectors adds detectors to be evaluated for the configured resource.
+func WithDetectors(detectors ...Detector) Option {
+ return detectorsOption{detectors: detectors}
+}
+
+type detectorsOption struct {
+ detectors []Detector
+}
+
+func (o detectorsOption) apply(cfg config) config {
+ cfg.detectors = append(cfg.detectors, o.detectors...)
+ return cfg
+}
+
+// WithFromEnv adds attributes from environment variables to the configured resource.
+func WithFromEnv() Option {
+ return WithDetectors(fromEnv{})
+}
+
+// WithHost adds attributes from the host to the configured resource.
+func WithHost() Option {
+ return WithDetectors(host{})
+}
+
+// WithHostID adds host ID information to the configured resource.
+func WithHostID() Option {
+ return WithDetectors(hostIDDetector{})
+}
+
+// WithTelemetrySDK adds TelemetrySDK version info to the configured resource.
+func WithTelemetrySDK() Option {
+ return WithDetectors(telemetrySDK{})
+}
+
+// WithSchemaURL sets the schema URL for the configured resource.
+func WithSchemaURL(schemaURL string) Option {
+ return schemaURLOption(schemaURL)
+}
+
+type schemaURLOption string
+
+func (o schemaURLOption) apply(cfg config) config {
+ cfg.schemaURL = string(o)
+ return cfg
+}
+
+// WithOS adds all the OS attributes to the configured Resource.
+// See individual WithOS* functions to configure specific attributes.
+func WithOS() Option {
+ return WithDetectors(
+ osTypeDetector{},
+ osDescriptionDetector{},
+ )
+}
+
+// WithOSType adds an attribute with the operating system type to the configured Resource.
+func WithOSType() Option {
+ return WithDetectors(osTypeDetector{})
+}
+
+// WithOSDescription adds an attribute with the operating system description to the
+// configured Resource. The formatted string is equivalent to the output of the
+// `uname -snrvm` command.
+func WithOSDescription() Option {
+ return WithDetectors(osDescriptionDetector{})
+}
+
+// WithProcess adds all the Process attributes to the configured Resource.
+//
+// Warning! This option will include process command line arguments. If these
+// contain sensitive information it will be included in the exported resource.
+//
+// This option is equivalent to calling WithProcessPID,
+// WithProcessExecutableName, WithProcessExecutablePath,
+// WithProcessCommandArgs, WithProcessOwner, WithProcessRuntimeName,
+// WithProcessRuntimeVersion, and WithProcessRuntimeDescription. See each
+// option function for information about what resource attributes each
+// includes.
+func WithProcess() Option {
+ return WithDetectors(
+ processPIDDetector{},
+ processExecutableNameDetector{},
+ processExecutablePathDetector{},
+ processCommandArgsDetector{},
+ processOwnerDetector{},
+ processRuntimeNameDetector{},
+ processRuntimeVersionDetector{},
+ processRuntimeDescriptionDetector{},
+ )
+}
+
+// WithProcessPID adds an attribute with the process identifier (PID) to the
+// configured Resource.
+func WithProcessPID() Option {
+ return WithDetectors(processPIDDetector{})
+}
+
+// WithProcessExecutableName adds an attribute with the name of the process
+// executable to the configured Resource.
+func WithProcessExecutableName() Option {
+ return WithDetectors(processExecutableNameDetector{})
+}
+
+// WithProcessExecutablePath adds an attribute with the full path to the process
+// executable to the configured Resource.
+func WithProcessExecutablePath() Option {
+ return WithDetectors(processExecutablePathDetector{})
+}
+
+// WithProcessCommandArgs adds an attribute with all the command arguments (including
+// the command/executable itself) as received by the process to the configured
+// Resource.
+//
+// Warning! This option will include process command line arguments. If these
+// contain sensitive information it will be included in the exported resource.
+func WithProcessCommandArgs() Option {
+ return WithDetectors(processCommandArgsDetector{})
+}
+
+// WithProcessOwner adds an attribute with the username of the user that owns the process
+// to the configured Resource.
+func WithProcessOwner() Option {
+ return WithDetectors(processOwnerDetector{})
+}
+
+// WithProcessRuntimeName adds an attribute with the name of the runtime of this
+// process to the configured Resource.
+func WithProcessRuntimeName() Option {
+ return WithDetectors(processRuntimeNameDetector{})
+}
+
+// WithProcessRuntimeVersion adds an attribute with the version of the runtime of
+// this process to the configured Resource.
+func WithProcessRuntimeVersion() Option {
+ return WithDetectors(processRuntimeVersionDetector{})
+}
+
+// WithProcessRuntimeDescription adds an attribute with an additional description
+// about the runtime of the process to the configured Resource.
+func WithProcessRuntimeDescription() Option {
+ return WithDetectors(processRuntimeDescriptionDetector{})
+}
+
+// WithContainer adds all the Container attributes to the configured Resource.
+// See individual WithContainer* functions to configure specific attributes.
+func WithContainer() Option {
+ return WithDetectors(
+ cgroupContainerIDDetector{},
+ )
+}
+
+// WithContainerID adds an attribute with the id of the container to the configured Resource.
+// Note: WithContainerID will not extract the correct container ID in an ECS environment.
+// Please use the ECS resource detector instead (https://pkg.go.dev/go.opentelemetry.io/contrib/detectors/aws/ecs).
+func WithContainerID() Option {
+ return WithDetectors(cgroupContainerIDDetector{})
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go
new file mode 100644
index 000000000..5ecd859a5
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go
@@ -0,0 +1,89 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "bufio"
+ "context"
+ "errors"
+ "io"
+ "os"
+ "regexp"
+
+ semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
+)
+
+type containerIDProvider func() (string, error)
+
+var (
+ containerID containerIDProvider = getContainerIDFromCGroup
+ cgroupContainerIDRe = regexp.MustCompile(`^.*/(?:.*[-:])?([0-9a-f]+)(?:\.|\s*$)`)
+)
+
+type cgroupContainerIDDetector struct{}
+
+const cgroupPath = "/proc/self/cgroup"
+
+// Detect returns a *Resource that describes the id of the container.
+// If no container id found, an empty resource will be returned.
+func (cgroupContainerIDDetector) Detect(ctx context.Context) (*Resource, error) {
+ containerID, err := containerID()
+ if err != nil {
+ return nil, err
+ }
+
+ if containerID == "" {
+ return Empty(), nil
+ }
+ return NewWithAttributes(semconv.SchemaURL, semconv.ContainerID(containerID)), nil
+}
+
+var (
+ defaultOSStat = os.Stat
+ osStat = defaultOSStat
+
+ defaultOSOpen = func(name string) (io.ReadCloser, error) {
+ return os.Open(name)
+ }
+ osOpen = defaultOSOpen
+)
+
+// getContainerIDFromCGroup returns the id of the container from the cgroup file.
+// If no container id found, an empty string will be returned.
+func getContainerIDFromCGroup() (string, error) {
+ if _, err := osStat(cgroupPath); errors.Is(err, os.ErrNotExist) {
+ // File does not exist, skip
+ return "", nil
+ }
+
+ file, err := osOpen(cgroupPath)
+ if err != nil {
+ return "", err
+ }
+ defer file.Close()
+
+ return getContainerIDFromReader(file), nil
+}
+
+// getContainerIDFromReader returns the id of the container from reader.
+func getContainerIDFromReader(reader io.Reader) string {
+ scanner := bufio.NewScanner(reader)
+ for scanner.Scan() {
+ line := scanner.Text()
+
+ if id := getContainerIDFromLine(line); id != "" {
+ return id
+ }
+ }
+ return ""
+}
+
+// getContainerIDFromLine returns the id of the container from one string line.
+func getContainerIDFromLine(line string) string {
+ matches := cgroupContainerIDRe.FindStringSubmatch(line)
+ if len(matches) <= 1 {
+ return ""
+ }
+ return matches[1]
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go b/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go
new file mode 100644
index 000000000..64939a271
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package resource provides detecting and representing resources.
+//
+// The fundamental struct is a Resource which holds identifying information
+// about the entities for which telemetry is exported.
+//
+// To automatically construct Resources from an environment a Detector
+// interface is defined. Implementations of this interface can be passed to
+// the Detect function to generate a Resource from the merged information.
+//
+// To load a user defined Resource from the environment variable
+// OTEL_RESOURCE_ATTRIBUTES the FromEnv Detector can be used. It will interpret
+// the value as a list of comma delimited key/value pairs
+// (e.g. `=,=,...`).
+//
+// While this package provides a stable API,
+// the attributes added by resource detectors may change.
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go
new file mode 100644
index 000000000..813f05624
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go
@@ -0,0 +1,95 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "os"
+ "strings"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+ semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
+)
+
+const (
+ // resourceAttrKey is the environment variable name OpenTelemetry Resource information will be read from.
+ resourceAttrKey = "OTEL_RESOURCE_ATTRIBUTES" //nolint:gosec // False positive G101: Potential hardcoded credentials
+
+ // svcNameKey is the environment variable name that Service Name information will be read from.
+ svcNameKey = "OTEL_SERVICE_NAME"
+)
+
+// errMissingValue is returned when a resource value is missing.
+var errMissingValue = fmt.Errorf("%w: missing value", ErrPartialResource)
+
+// fromEnv is a Detector that implements the Detector and collects
+// resources from environment. This Detector is included as a
+// builtin.
+type fromEnv struct{}
+
+// compile time assertion that FromEnv implements Detector interface.
+var _ Detector = fromEnv{}
+
+// Detect collects resources from environment.
+func (fromEnv) Detect(context.Context) (*Resource, error) {
+ attrs := strings.TrimSpace(os.Getenv(resourceAttrKey))
+ svcName := strings.TrimSpace(os.Getenv(svcNameKey))
+
+ if attrs == "" && svcName == "" {
+ return Empty(), nil
+ }
+
+ var res *Resource
+
+ if svcName != "" {
+ res = NewSchemaless(semconv.ServiceName(svcName))
+ }
+
+ r2, err := constructOTResources(attrs)
+
+ // Ensure that the resource with the service name from OTEL_SERVICE_NAME
+ // takes precedence, if it was defined.
+ res, err2 := Merge(r2, res)
+
+ if err == nil {
+ err = err2
+ } else if err2 != nil {
+ err = fmt.Errorf("detecting resources: %s", []string{err.Error(), err2.Error()})
+ }
+
+ return res, err
+}
+
+func constructOTResources(s string) (*Resource, error) {
+ if s == "" {
+ return Empty(), nil
+ }
+ pairs := strings.Split(s, ",")
+ var attrs []attribute.KeyValue
+ var invalid []string
+ for _, p := range pairs {
+ k, v, found := strings.Cut(p, "=")
+ if !found {
+ invalid = append(invalid, p)
+ continue
+ }
+ key := strings.TrimSpace(k)
+ val, err := url.PathUnescape(strings.TrimSpace(v))
+ if err != nil {
+ // Retain original value if decoding fails, otherwise it will be
+ // an empty string.
+ val = v
+ otel.Handle(err)
+ }
+ attrs = append(attrs, attribute.String(key, val))
+ }
+ var err error
+ if len(invalid) > 0 {
+ err = fmt.Errorf("%w: %v", errMissingValue, invalid)
+ }
+ return NewSchemaless(attrs...), err
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go
new file mode 100644
index 000000000..2d0f65498
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go
@@ -0,0 +1,109 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "context"
+ "errors"
+ "strings"
+
+ semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
+)
+
+type hostIDProvider func() (string, error)
+
+var defaultHostIDProvider hostIDProvider = platformHostIDReader.read
+
+var hostID = defaultHostIDProvider
+
+type hostIDReader interface {
+ read() (string, error)
+}
+
+type fileReader func(string) (string, error)
+
+type commandExecutor func(string, ...string) (string, error)
+
+// hostIDReaderBSD implements hostIDReader.
+type hostIDReaderBSD struct {
+ execCommand commandExecutor
+ readFile fileReader
+}
+
+// read attempts to read the machine-id from /etc/hostid. If not found it will
+// execute `kenv -q smbios.system.uuid`. If neither location yields an id an
+// error will be returned.
+func (r *hostIDReaderBSD) read() (string, error) {
+ if result, err := r.readFile("/etc/hostid"); err == nil {
+ return strings.TrimSpace(result), nil
+ }
+
+ if result, err := r.execCommand("kenv", "-q", "smbios.system.uuid"); err == nil {
+ return strings.TrimSpace(result), nil
+ }
+
+ return "", errors.New("host id not found in: /etc/hostid or kenv")
+}
+
+// hostIDReaderDarwin implements hostIDReader.
+type hostIDReaderDarwin struct {
+ execCommand commandExecutor
+}
+
+// read executes `ioreg -rd1 -c "IOPlatformExpertDevice"` and parses host id
+// from the IOPlatformUUID line. If the command fails or the uuid cannot be
+// parsed an error will be returned.
+func (r *hostIDReaderDarwin) read() (string, error) {
+ result, err := r.execCommand("ioreg", "-rd1", "-c", "IOPlatformExpertDevice")
+ if err != nil {
+ return "", err
+ }
+
+ lines := strings.Split(result, "\n")
+ for _, line := range lines {
+ if strings.Contains(line, "IOPlatformUUID") {
+ parts := strings.Split(line, " = ")
+ if len(parts) == 2 {
+ return strings.Trim(parts[1], "\""), nil
+ }
+ break
+ }
+ }
+
+ return "", errors.New("could not parse IOPlatformUUID")
+}
+
+type hostIDReaderLinux struct {
+ readFile fileReader
+}
+
+// read attempts to read the machine-id from /etc/machine-id followed by
+// /var/lib/dbus/machine-id. If neither location yields an ID an error will
+// be returned.
+func (r *hostIDReaderLinux) read() (string, error) {
+ if result, err := r.readFile("/etc/machine-id"); err == nil {
+ return strings.TrimSpace(result), nil
+ }
+
+ if result, err := r.readFile("/var/lib/dbus/machine-id"); err == nil {
+ return strings.TrimSpace(result), nil
+ }
+
+ return "", errors.New("host id not found in: /etc/machine-id or /var/lib/dbus/machine-id")
+}
+
+type hostIDDetector struct{}
+
+// Detect returns a *Resource containing the platform specific host id.
+func (hostIDDetector) Detect(ctx context.Context) (*Resource, error) {
+ hostID, err := hostID()
+ if err != nil {
+ return nil, err
+ }
+
+ return NewWithAttributes(
+ semconv.SchemaURL,
+ semconv.HostID(hostID),
+ ), nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go
new file mode 100644
index 000000000..cc8b8938e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go
@@ -0,0 +1,12 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+//go:build dragonfly || freebsd || netbsd || openbsd || solaris
+// +build dragonfly freebsd netbsd openbsd solaris
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+var platformHostIDReader hostIDReader = &hostIDReaderBSD{
+ execCommand: execCommand,
+ readFile: readFile,
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go
new file mode 100644
index 000000000..b09fde3b7
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go
@@ -0,0 +1,8 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+var platformHostIDReader hostIDReader = &hostIDReaderDarwin{
+ execCommand: execCommand,
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go
new file mode 100644
index 000000000..d9e5d1a8f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go
@@ -0,0 +1,18 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+//go:build darwin || dragonfly || freebsd || netbsd || openbsd || solaris
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import "os/exec"
+
+func execCommand(name string, arg ...string) (string, error) {
+ cmd := exec.Command(name, arg...)
+ b, err := cmd.Output()
+ if err != nil {
+ return "", err
+ }
+
+ return string(b), nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go
new file mode 100644
index 000000000..f84f17324
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go
@@ -0,0 +1,11 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+//go:build linux
+// +build linux
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+var platformHostIDReader hostIDReader = &hostIDReaderLinux{
+ readFile: readFile,
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go
new file mode 100644
index 000000000..6354b3560
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go
@@ -0,0 +1,17 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+//go:build linux || dragonfly || freebsd || netbsd || openbsd || solaris
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import "os"
+
+func readFile(filename string) (string, error) {
+ b, err := os.ReadFile(filename)
+ if err != nil {
+ return "", err
+ }
+
+ return string(b), nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go
new file mode 100644
index 000000000..df12c44c5
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go
@@ -0,0 +1,19 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+//go:build !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows
+// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+// hostIDReaderUnsupported is a placeholder implementation for operating systems
+// for which this project currently doesn't support host.id
+// attribute detection. See build tags declaration early on this file
+// for a list of unsupported OSes.
+type hostIDReaderUnsupported struct{}
+
+func (*hostIDReaderUnsupported) read() (string, error) {
+ return "", nil
+}
+
+var platformHostIDReader hostIDReader = &hostIDReaderUnsupported{}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go
new file mode 100644
index 000000000..3677c83d7
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go
@@ -0,0 +1,36 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+//go:build windows
+// +build windows
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "golang.org/x/sys/windows/registry"
+)
+
+// implements hostIDReader.
+type hostIDReaderWindows struct{}
+
+// read reads MachineGuid from the Windows registry key:
+// SOFTWARE\Microsoft\Cryptography.
+func (*hostIDReaderWindows) read() (string, error) {
+ k, err := registry.OpenKey(
+ registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Cryptography`,
+ registry.QUERY_VALUE|registry.WOW64_64KEY,
+ )
+ if err != nil {
+ return "", err
+ }
+ defer k.Close()
+
+ guid, _, err := k.GetStringValue("MachineGuid")
+ if err != nil {
+ return "", err
+ }
+
+ return guid, nil
+}
+
+var platformHostIDReader hostIDReader = &hostIDReaderWindows{}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go
new file mode 100644
index 000000000..8a48ab4fa
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go
@@ -0,0 +1,89 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "context"
+ "strings"
+
+ "go.opentelemetry.io/otel/attribute"
+ semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
+)
+
+type osDescriptionProvider func() (string, error)
+
+var defaultOSDescriptionProvider osDescriptionProvider = platformOSDescription
+
+var osDescription = defaultOSDescriptionProvider
+
+func setDefaultOSDescriptionProvider() {
+ setOSDescriptionProvider(defaultOSDescriptionProvider)
+}
+
+func setOSDescriptionProvider(osDescriptionProvider osDescriptionProvider) {
+ osDescription = osDescriptionProvider
+}
+
+type (
+ osTypeDetector struct{}
+ osDescriptionDetector struct{}
+)
+
+// Detect returns a *Resource that describes the operating system type the
+// service is running on.
+func (osTypeDetector) Detect(ctx context.Context) (*Resource, error) {
+ osType := runtimeOS()
+
+ osTypeAttribute := mapRuntimeOSToSemconvOSType(osType)
+
+ return NewWithAttributes(
+ semconv.SchemaURL,
+ osTypeAttribute,
+ ), nil
+}
+
+// Detect returns a *Resource that describes the operating system the
+// service is running on.
+func (osDescriptionDetector) Detect(ctx context.Context) (*Resource, error) {
+ description, err := osDescription()
+ if err != nil {
+ return nil, err
+ }
+
+ return NewWithAttributes(
+ semconv.SchemaURL,
+ semconv.OSDescription(description),
+ ), nil
+}
+
+// mapRuntimeOSToSemconvOSType translates the OS name as provided by the Go runtime
+// into an OS type attribute with the corresponding value defined by the semantic
+// conventions. In case the provided OS name isn't mapped, it's transformed to lowercase
+// and used as the value for the returned OS type attribute.
+func mapRuntimeOSToSemconvOSType(osType string) attribute.KeyValue {
+ // the elements in this map are the intersection between
+ // available GOOS values and defined semconv OS types
+ osTypeAttributeMap := map[string]attribute.KeyValue{
+ "aix": semconv.OSTypeAIX,
+ "darwin": semconv.OSTypeDarwin,
+ "dragonfly": semconv.OSTypeDragonflyBSD,
+ "freebsd": semconv.OSTypeFreeBSD,
+ "linux": semconv.OSTypeLinux,
+ "netbsd": semconv.OSTypeNetBSD,
+ "openbsd": semconv.OSTypeOpenBSD,
+ "solaris": semconv.OSTypeSolaris,
+ "windows": semconv.OSTypeWindows,
+ "zos": semconv.OSTypeZOS,
+ }
+
+ var osTypeAttribute attribute.KeyValue
+
+ if attr, ok := osTypeAttributeMap[osType]; ok {
+ osTypeAttribute = attr
+ } else {
+ osTypeAttribute = semconv.OSTypeKey.String(strings.ToLower(osType))
+ }
+
+ return osTypeAttribute
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go
new file mode 100644
index 000000000..ce455dc54
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go
@@ -0,0 +1,91 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "encoding/xml"
+ "fmt"
+ "io"
+ "os"
+)
+
+type plist struct {
+ XMLName xml.Name `xml:"plist"`
+ Dict dict `xml:"dict"`
+}
+
+type dict struct {
+ Key []string `xml:"key"`
+ String []string `xml:"string"`
+}
+
+// osRelease builds a string describing the operating system release based on the
+// contents of the property list (.plist) system files. If no .plist files are found,
+// or if the required properties to build the release description string are missing,
+// an empty string is returned instead. The generated string resembles the output of
+// the `sw_vers` commandline program, but in a single-line string. For more information
+// about the `sw_vers` program, see: https://www.unix.com/man-page/osx/1/SW_VERS.
+func osRelease() string {
+ file, err := getPlistFile()
+ if err != nil {
+ return ""
+ }
+
+ defer file.Close()
+
+ values, err := parsePlistFile(file)
+ if err != nil {
+ return ""
+ }
+
+ return buildOSRelease(values)
+}
+
+// getPlistFile returns a *os.File pointing to one of the well-known .plist files
+// available on macOS. If no file can be opened, it returns an error.
+func getPlistFile() (*os.File, error) {
+ return getFirstAvailableFile([]string{
+ "/System/Library/CoreServices/SystemVersion.plist",
+ "/System/Library/CoreServices/ServerVersion.plist",
+ })
+}
+
+// parsePlistFile process the file pointed by `file` as a .plist file and returns
+// a map with the key-values for each pair of correlated and elements
+// contained in it.
+func parsePlistFile(file io.Reader) (map[string]string, error) {
+ var v plist
+
+ err := xml.NewDecoder(file).Decode(&v)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(v.Dict.Key) != len(v.Dict.String) {
+ return nil, fmt.Errorf("the number of and elements doesn't match")
+ }
+
+ properties := make(map[string]string, len(v.Dict.Key))
+ for i, key := range v.Dict.Key {
+ properties[key] = v.Dict.String[i]
+ }
+
+ return properties, nil
+}
+
+// buildOSRelease builds a string describing the OS release based on the properties
+// available on the provided map. It tries to find the `ProductName`, `ProductVersion`
+// and `ProductBuildVersion` properties. If some of these properties are not found,
+// it returns an empty string.
+func buildOSRelease(properties map[string]string) string {
+ productName := properties["ProductName"]
+ productVersion := properties["ProductVersion"]
+ productBuildVersion := properties["ProductBuildVersion"]
+
+ if productName == "" || productVersion == "" || productBuildVersion == "" {
+ return ""
+ }
+
+ return fmt.Sprintf("%s %s (%s)", productName, productVersion, productBuildVersion)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go
new file mode 100644
index 000000000..f537e5ca5
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go
@@ -0,0 +1,143 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+//go:build aix || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
+// +build aix dragonfly freebsd linux netbsd openbsd solaris zos
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+)
+
+// osRelease builds a string describing the operating system release based on the
+// properties of the os-release file. If no os-release file is found, or if the
+// required properties to build the release description string are missing, an empty
+// string is returned instead. For more information about os-release files, see:
+// https://www.freedesktop.org/software/systemd/man/os-release.html
+func osRelease() string {
+ file, err := getOSReleaseFile()
+ if err != nil {
+ return ""
+ }
+
+ defer file.Close()
+
+ values := parseOSReleaseFile(file)
+
+ return buildOSRelease(values)
+}
+
+// getOSReleaseFile returns a *os.File pointing to one of the well-known os-release
+// files, according to their order of preference. If no file can be opened, it
+// returns an error.
+func getOSReleaseFile() (*os.File, error) {
+ return getFirstAvailableFile([]string{"/etc/os-release", "/usr/lib/os-release"})
+}
+
+// parseOSReleaseFile process the file pointed by `file` as an os-release file and
+// returns a map with the key-values contained in it. Empty lines or lines starting
+// with a '#' character are ignored, as well as lines with the missing key=value
+// separator. Values are unquoted and unescaped.
+func parseOSReleaseFile(file io.Reader) map[string]string {
+ values := make(map[string]string)
+ scanner := bufio.NewScanner(file)
+
+ for scanner.Scan() {
+ line := scanner.Text()
+
+ if skip(line) {
+ continue
+ }
+
+ key, value, ok := parse(line)
+ if ok {
+ values[key] = value
+ }
+ }
+
+ return values
+}
+
+// skip returns true if the line is blank or starts with a '#' character, and
+// therefore should be skipped from processing.
+func skip(line string) bool {
+ line = strings.TrimSpace(line)
+
+ return len(line) == 0 || strings.HasPrefix(line, "#")
+}
+
+// parse attempts to split the provided line on the first '=' character, and then
+// sanitize each side of the split before returning them as a key-value pair.
+func parse(line string) (string, string, bool) {
+ k, v, found := strings.Cut(line, "=")
+
+ if !found || len(k) == 0 {
+ return "", "", false
+ }
+
+ key := strings.TrimSpace(k)
+ value := unescape(unquote(strings.TrimSpace(v)))
+
+ return key, value, true
+}
+
+// unquote checks whether the string `s` is quoted with double or single quotes
+// and, if so, returns a version of the string without them. Otherwise it returns
+// the provided string unchanged.
+func unquote(s string) string {
+ if len(s) < 2 {
+ return s
+ }
+
+ if (s[0] == '"' || s[0] == '\'') && s[0] == s[len(s)-1] {
+ return s[1 : len(s)-1]
+ }
+
+ return s
+}
+
+// unescape removes the `\` prefix from some characters that are expected
+// to have it added in front of them for escaping purposes.
+func unescape(s string) string {
+ return strings.NewReplacer(
+ `\$`, `$`,
+ `\"`, `"`,
+ `\'`, `'`,
+ `\\`, `\`,
+ "\\`", "`",
+ ).Replace(s)
+}
+
+// buildOSRelease builds a string describing the OS release based on the properties
+// available on the provided map. It favors a combination of the `NAME` and `VERSION`
+// properties as first option (falling back to `VERSION_ID` if `VERSION` isn't
+// found), and using `PRETTY_NAME` alone if some of the previous are not present. If
+// none of these properties are found, it returns an empty string.
+//
+// The rationale behind not using `PRETTY_NAME` as first choice was that, for some
+// Linux distributions, it doesn't include the same detail that can be found on the
+// individual `NAME` and `VERSION` properties, and combining `PRETTY_NAME` with
+// other properties can produce "pretty" redundant strings in some cases.
+func buildOSRelease(values map[string]string) string {
+ var osRelease string
+
+ name := values["NAME"]
+ version := values["VERSION"]
+
+ if version == "" {
+ version = values["VERSION_ID"]
+ }
+
+ if name != "" && version != "" {
+ osRelease = fmt.Sprintf("%s %s", name, version)
+ } else {
+ osRelease = values["PRETTY_NAME"]
+ }
+
+ return osRelease
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go
new file mode 100644
index 000000000..a6ff26a4d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go
@@ -0,0 +1,79 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "fmt"
+ "os"
+
+ "golang.org/x/sys/unix"
+)
+
+type unameProvider func(buf *unix.Utsname) (err error)
+
+var defaultUnameProvider unameProvider = unix.Uname
+
+var currentUnameProvider = defaultUnameProvider
+
+func setDefaultUnameProvider() {
+ setUnameProvider(defaultUnameProvider)
+}
+
+func setUnameProvider(unameProvider unameProvider) {
+ currentUnameProvider = unameProvider
+}
+
+// platformOSDescription returns a human readable OS version information string.
+// The final string combines OS release information (where available) and the
+// result of the `uname` system call.
+func platformOSDescription() (string, error) {
+ uname, err := uname()
+ if err != nil {
+ return "", err
+ }
+
+ osRelease := osRelease()
+ if osRelease != "" {
+ return fmt.Sprintf("%s (%s)", osRelease, uname), nil
+ }
+
+ return uname, nil
+}
+
+// uname issues a uname(2) system call (or equivalent on systems which doesn't
+// have one) and formats the output in a single string, similar to the output
+// of the `uname` commandline program. The final string resembles the one
+// obtained with a call to `uname -snrvm`.
+func uname() (string, error) {
+ var utsName unix.Utsname
+
+ err := currentUnameProvider(&utsName)
+ if err != nil {
+ return "", err
+ }
+
+ return fmt.Sprintf("%s %s %s %s %s",
+ unix.ByteSliceToString(utsName.Sysname[:]),
+ unix.ByteSliceToString(utsName.Nodename[:]),
+ unix.ByteSliceToString(utsName.Release[:]),
+ unix.ByteSliceToString(utsName.Version[:]),
+ unix.ByteSliceToString(utsName.Machine[:]),
+ ), nil
+}
+
+// getFirstAvailableFile returns an *os.File of the first available
+// file from a list of candidate file paths.
+func getFirstAvailableFile(candidates []string) (*os.File, error) {
+ for _, c := range candidates {
+ file, err := os.Open(c)
+ if err == nil {
+ return file, nil
+ }
+ }
+
+ return nil, fmt.Errorf("no candidate file available: %v", candidates)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go
new file mode 100644
index 000000000..a77742b07
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go
@@ -0,0 +1,15 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos
+// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+// platformOSDescription is a placeholder implementation for OSes
+// for which this project currently doesn't support os.description
+// attribute detection. See build tags declaration early on this file
+// for a list of unsupported OSes.
+func platformOSDescription() (string, error) {
+ return "", nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go
new file mode 100644
index 000000000..a6a5a53c0
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go
@@ -0,0 +1,89 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "fmt"
+ "strconv"
+
+ "golang.org/x/sys/windows/registry"
+)
+
+// platformOSDescription returns a human readable OS version information string.
+// It does so by querying registry values under the
+// `SOFTWARE\Microsoft\Windows NT\CurrentVersion` key. The final string
+// resembles the one displayed by the Version Reporter Applet (winver.exe).
+func platformOSDescription() (string, error) {
+ k, err := registry.OpenKey(
+ registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE)
+ if err != nil {
+ return "", err
+ }
+
+ defer k.Close()
+
+ var (
+ productName = readProductName(k)
+ displayVersion = readDisplayVersion(k)
+ releaseID = readReleaseID(k)
+ currentMajorVersionNumber = readCurrentMajorVersionNumber(k)
+ currentMinorVersionNumber = readCurrentMinorVersionNumber(k)
+ currentBuildNumber = readCurrentBuildNumber(k)
+ ubr = readUBR(k)
+ )
+
+ if displayVersion != "" {
+ displayVersion += " "
+ }
+
+ return fmt.Sprintf("%s %s(%s) [Version %s.%s.%s.%s]",
+ productName,
+ displayVersion,
+ releaseID,
+ currentMajorVersionNumber,
+ currentMinorVersionNumber,
+ currentBuildNumber,
+ ubr,
+ ), nil
+}
+
+func getStringValue(name string, k registry.Key) string {
+ value, _, _ := k.GetStringValue(name)
+
+ return value
+}
+
+func getIntegerValue(name string, k registry.Key) uint64 {
+ value, _, _ := k.GetIntegerValue(name)
+
+ return value
+}
+
+func readProductName(k registry.Key) string {
+ return getStringValue("ProductName", k)
+}
+
+func readDisplayVersion(k registry.Key) string {
+ return getStringValue("DisplayVersion", k)
+}
+
+func readReleaseID(k registry.Key) string {
+ return getStringValue("ReleaseID", k)
+}
+
+func readCurrentMajorVersionNumber(k registry.Key) string {
+ return strconv.FormatUint(getIntegerValue("CurrentMajorVersionNumber", k), 10)
+}
+
+func readCurrentMinorVersionNumber(k registry.Key) string {
+ return strconv.FormatUint(getIntegerValue("CurrentMinorVersionNumber", k), 10)
+}
+
+func readCurrentBuildNumber(k registry.Key) string {
+ return getStringValue("CurrentBuildNumber", k)
+}
+
+func readUBR(k registry.Key) string {
+ return strconv.FormatUint(getIntegerValue("UBR", k), 10)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go
new file mode 100644
index 000000000..085fe68fd
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go
@@ -0,0 +1,173 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "os/user"
+ "path/filepath"
+ "runtime"
+
+ semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
+)
+
+type (
+ pidProvider func() int
+ executablePathProvider func() (string, error)
+ commandArgsProvider func() []string
+ ownerProvider func() (*user.User, error)
+ runtimeNameProvider func() string
+ runtimeVersionProvider func() string
+ runtimeOSProvider func() string
+ runtimeArchProvider func() string
+)
+
+var (
+ defaultPidProvider pidProvider = os.Getpid
+ defaultExecutablePathProvider executablePathProvider = os.Executable
+ defaultCommandArgsProvider commandArgsProvider = func() []string { return os.Args }
+ defaultOwnerProvider ownerProvider = user.Current
+ defaultRuntimeNameProvider runtimeNameProvider = func() string {
+ if runtime.Compiler == "gc" {
+ return "go"
+ }
+ return runtime.Compiler
+ }
+ defaultRuntimeVersionProvider runtimeVersionProvider = runtime.Version
+ defaultRuntimeOSProvider runtimeOSProvider = func() string { return runtime.GOOS }
+ defaultRuntimeArchProvider runtimeArchProvider = func() string { return runtime.GOARCH }
+)
+
+var (
+ pid = defaultPidProvider
+ executablePath = defaultExecutablePathProvider
+ commandArgs = defaultCommandArgsProvider
+ owner = defaultOwnerProvider
+ runtimeName = defaultRuntimeNameProvider
+ runtimeVersion = defaultRuntimeVersionProvider
+ runtimeOS = defaultRuntimeOSProvider
+ runtimeArch = defaultRuntimeArchProvider
+)
+
+func setDefaultOSProviders() {
+ setOSProviders(
+ defaultPidProvider,
+ defaultExecutablePathProvider,
+ defaultCommandArgsProvider,
+ )
+}
+
+func setOSProviders(
+ pidProvider pidProvider,
+ executablePathProvider executablePathProvider,
+ commandArgsProvider commandArgsProvider,
+) {
+ pid = pidProvider
+ executablePath = executablePathProvider
+ commandArgs = commandArgsProvider
+}
+
+func setDefaultRuntimeProviders() {
+ setRuntimeProviders(
+ defaultRuntimeNameProvider,
+ defaultRuntimeVersionProvider,
+ defaultRuntimeOSProvider,
+ defaultRuntimeArchProvider,
+ )
+}
+
+func setRuntimeProviders(
+ runtimeNameProvider runtimeNameProvider,
+ runtimeVersionProvider runtimeVersionProvider,
+ runtimeOSProvider runtimeOSProvider,
+ runtimeArchProvider runtimeArchProvider,
+) {
+ runtimeName = runtimeNameProvider
+ runtimeVersion = runtimeVersionProvider
+ runtimeOS = runtimeOSProvider
+ runtimeArch = runtimeArchProvider
+}
+
+func setDefaultUserProviders() {
+ setUserProviders(defaultOwnerProvider)
+}
+
+func setUserProviders(ownerProvider ownerProvider) {
+ owner = ownerProvider
+}
+
+type (
+ processPIDDetector struct{}
+ processExecutableNameDetector struct{}
+ processExecutablePathDetector struct{}
+ processCommandArgsDetector struct{}
+ processOwnerDetector struct{}
+ processRuntimeNameDetector struct{}
+ processRuntimeVersionDetector struct{}
+ processRuntimeDescriptionDetector struct{}
+)
+
+// Detect returns a *Resource that describes the process identifier (PID) of the
+// executing process.
+func (processPIDDetector) Detect(ctx context.Context) (*Resource, error) {
+ return NewWithAttributes(semconv.SchemaURL, semconv.ProcessPID(pid())), nil
+}
+
+// Detect returns a *Resource that describes the name of the process executable.
+func (processExecutableNameDetector) Detect(ctx context.Context) (*Resource, error) {
+ executableName := filepath.Base(commandArgs()[0])
+
+ return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutableName(executableName)), nil
+}
+
+// Detect returns a *Resource that describes the full path of the process executable.
+func (processExecutablePathDetector) Detect(ctx context.Context) (*Resource, error) {
+ executablePath, err := executablePath()
+ if err != nil {
+ return nil, err
+ }
+
+ return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutablePath(executablePath)), nil
+}
+
+// Detect returns a *Resource that describes all the command arguments as received
+// by the process.
+func (processCommandArgsDetector) Detect(ctx context.Context) (*Resource, error) {
+ return NewWithAttributes(semconv.SchemaURL, semconv.ProcessCommandArgs(commandArgs()...)), nil
+}
+
+// Detect returns a *Resource that describes the username of the user that owns the
+// process.
+func (processOwnerDetector) Detect(ctx context.Context) (*Resource, error) {
+ owner, err := owner()
+ if err != nil {
+ return nil, err
+ }
+
+ return NewWithAttributes(semconv.SchemaURL, semconv.ProcessOwner(owner.Username)), nil
+}
+
+// Detect returns a *Resource that describes the name of the compiler used to compile
+// this process image.
+func (processRuntimeNameDetector) Detect(ctx context.Context) (*Resource, error) {
+ return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeName(runtimeName())), nil
+}
+
+// Detect returns a *Resource that describes the version of the runtime of this process.
+func (processRuntimeVersionDetector) Detect(ctx context.Context) (*Resource, error) {
+ return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeVersion(runtimeVersion())), nil
+}
+
+// Detect returns a *Resource that describes the runtime of this process.
+func (processRuntimeDescriptionDetector) Detect(ctx context.Context) (*Resource, error) {
+ runtimeDescription := fmt.Sprintf(
+ "go version %s %s/%s", runtimeVersion(), runtimeOS(), runtimeArch())
+
+ return NewWithAttributes(
+ semconv.SchemaURL,
+ semconv.ProcessRuntimeDescription(runtimeDescription),
+ ), nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go
new file mode 100644
index 000000000..ad4b50df4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go
@@ -0,0 +1,294 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/internal/x"
+)
+
+// Resource describes an entity about which identifying information
+// and metadata is exposed. Resource is an immutable object,
+// equivalent to a map from key to unique value.
+//
+// Resources should be passed and stored as pointers
+// (`*resource.Resource`). The `nil` value is equivalent to an empty
+// Resource.
+type Resource struct {
+ attrs attribute.Set
+ schemaURL string
+}
+
+var (
+ defaultResource *Resource
+ defaultResourceOnce sync.Once
+)
+
+// ErrSchemaURLConflict is an error returned when two Resources are merged
+// together that contain different, non-empty, schema URLs.
+var ErrSchemaURLConflict = errors.New("conflicting Schema URL")
+
+// New returns a [Resource] built using opts.
+//
+// This may return a partial Resource along with an error containing
+// [ErrPartialResource] if options that provide a [Detector] are used and that
+// error is returned from one or more of the Detectors. It may also return a
+// merge-conflict Resource along with an error containing
+// [ErrSchemaURLConflict] if merging Resources from the opts results in a
+// schema URL conflict (see [Resource.Merge] for more information). It is up to
+// the caller to determine if this returned Resource should be used or not
+// based on these errors.
+func New(ctx context.Context, opts ...Option) (*Resource, error) {
+ cfg := config{}
+ for _, opt := range opts {
+ cfg = opt.apply(cfg)
+ }
+
+ r := &Resource{schemaURL: cfg.schemaURL}
+ return r, detect(ctx, r, cfg.detectors)
+}
+
+// NewWithAttributes creates a resource from attrs and associates the resource with a
+// schema URL. If attrs contains duplicate keys, the last value will be used. If attrs
+// contains any invalid items those items will be dropped. The attrs are assumed to be
+// in a schema identified by schemaURL.
+func NewWithAttributes(schemaURL string, attrs ...attribute.KeyValue) *Resource {
+ resource := NewSchemaless(attrs...)
+ resource.schemaURL = schemaURL
+ return resource
+}
+
+// NewSchemaless creates a resource from attrs. If attrs contains duplicate keys,
+// the last value will be used. If attrs contains any invalid items those items will
+// be dropped. The resource will not be associated with a schema URL. If the schema
+// of the attrs is known use NewWithAttributes instead.
+func NewSchemaless(attrs ...attribute.KeyValue) *Resource {
+ if len(attrs) == 0 {
+ return &Resource{}
+ }
+
+ // Ensure attributes comply with the specification:
+ // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/common/README.md#attribute
+ s, _ := attribute.NewSetWithFiltered(attrs, func(kv attribute.KeyValue) bool {
+ return kv.Valid()
+ })
+
+ // If attrs only contains invalid entries do not allocate a new resource.
+ if s.Len() == 0 {
+ return &Resource{}
+ }
+
+ return &Resource{attrs: s} //nolint
+}
+
+// String implements the Stringer interface and provides a
+// human-readable form of the resource.
+//
+// Avoid using this representation as the key in a map of resources,
+// use Equivalent() as the key instead.
+func (r *Resource) String() string {
+ if r == nil {
+ return ""
+ }
+ return r.attrs.Encoded(attribute.DefaultEncoder())
+}
+
+// MarshalLog is the marshaling function used by the logging system to represent this Resource.
+func (r *Resource) MarshalLog() interface{} {
+ return struct {
+ Attributes attribute.Set
+ SchemaURL string
+ }{
+ Attributes: r.attrs,
+ SchemaURL: r.schemaURL,
+ }
+}
+
+// Attributes returns a copy of attributes from the resource in a sorted order.
+// To avoid allocating a new slice, use an iterator.
+func (r *Resource) Attributes() []attribute.KeyValue {
+ if r == nil {
+ r = Empty()
+ }
+ return r.attrs.ToSlice()
+}
+
+// SchemaURL returns the schema URL associated with Resource r.
+func (r *Resource) SchemaURL() string {
+ if r == nil {
+ return ""
+ }
+ return r.schemaURL
+}
+
+// Iter returns an iterator of the Resource attributes.
+// This is ideal to use if you do not want a copy of the attributes.
+func (r *Resource) Iter() attribute.Iterator {
+ if r == nil {
+ r = Empty()
+ }
+ return r.attrs.Iter()
+}
+
+// Equal returns true when a Resource is equivalent to this Resource.
+func (r *Resource) Equal(eq *Resource) bool {
+ if r == nil {
+ r = Empty()
+ }
+ if eq == nil {
+ eq = Empty()
+ }
+ return r.Equivalent() == eq.Equivalent()
+}
+
+// Merge creates a new [Resource] by merging a and b.
+//
+// If there are common keys between a and b, then the value from b will
+// overwrite the value from a, even if b's value is empty.
+//
+// The SchemaURL of the resources will be merged according to the
+// [OpenTelemetry specification rules]:
+//
+// - If a's schema URL is empty then the returned Resource's schema URL will
+// be set to the schema URL of b,
+// - Else if b's schema URL is empty then the returned Resource's schema URL
+// will be set to the schema URL of a,
+// - Else if the schema URLs of a and b are the same then that will be the
+// schema URL of the returned Resource,
+// - Else this is a merging error. If the resources have different,
+// non-empty, schema URLs an error containing [ErrSchemaURLConflict] will
+// be returned with the merged Resource. The merged Resource will have an
+// empty schema URL. It may be the case that some unintended attributes
+// have been overwritten or old semantic conventions persisted in the
+// returned Resource. It is up to the caller to determine if this returned
+// Resource should be used or not.
+//
+// [OpenTelemetry specification rules]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/resource/sdk.md#merge
+func Merge(a, b *Resource) (*Resource, error) {
+ if a == nil && b == nil {
+ return Empty(), nil
+ }
+ if a == nil {
+ return b, nil
+ }
+ if b == nil {
+ return a, nil
+ }
+
+ // Note: 'b' attributes will overwrite 'a' with last-value-wins in attribute.Key()
+ // Meaning this is equivalent to: append(a.Attributes(), b.Attributes()...)
+ mi := attribute.NewMergeIterator(b.Set(), a.Set())
+ combine := make([]attribute.KeyValue, 0, a.Len()+b.Len())
+ for mi.Next() {
+ combine = append(combine, mi.Attribute())
+ }
+
+ switch {
+ case a.schemaURL == "":
+ return NewWithAttributes(b.schemaURL, combine...), nil
+ case b.schemaURL == "":
+ return NewWithAttributes(a.schemaURL, combine...), nil
+ case a.schemaURL == b.schemaURL:
+ return NewWithAttributes(a.schemaURL, combine...), nil
+ }
+ // Return the merged resource with an appropriate error. It is up to
+ // the user to decide if the returned resource can be used or not.
+ return NewSchemaless(combine...), fmt.Errorf(
+ "%w: %s and %s",
+ ErrSchemaURLConflict,
+ a.schemaURL,
+ b.schemaURL,
+ )
+}
+
+// Empty returns an instance of Resource with no attributes. It is
+// equivalent to a `nil` Resource.
+func Empty() *Resource {
+ return &Resource{}
+}
+
+// Default returns an instance of Resource with a default
+// "service.name" and OpenTelemetrySDK attributes.
+func Default() *Resource {
+ defaultResourceOnce.Do(func() {
+ var err error
+ defaultDetectors := []Detector{
+ defaultServiceNameDetector{},
+ fromEnv{},
+ telemetrySDK{},
+ }
+ if x.Resource.Enabled() {
+ defaultDetectors = append([]Detector{defaultServiceInstanceIDDetector{}}, defaultDetectors...)
+ }
+ defaultResource, err = Detect(
+ context.Background(),
+ defaultDetectors...,
+ )
+ if err != nil {
+ otel.Handle(err)
+ }
+ // If Detect did not return a valid resource, fall back to emptyResource.
+ if defaultResource == nil {
+ defaultResource = &Resource{}
+ }
+ })
+ return defaultResource
+}
+
+// Environment returns an instance of Resource with attributes
+// extracted from the OTEL_RESOURCE_ATTRIBUTES environment variable.
+func Environment() *Resource {
+ detector := &fromEnv{}
+ resource, err := detector.Detect(context.Background())
+ if err != nil {
+ otel.Handle(err)
+ }
+ return resource
+}
+
+// Equivalent returns an object that can be compared for equality
+// between two resources. This value is suitable for use as a key in
+// a map.
+func (r *Resource) Equivalent() attribute.Distinct {
+ return r.Set().Equivalent()
+}
+
+// Set returns the equivalent *attribute.Set of this resource's attributes.
+func (r *Resource) Set() *attribute.Set {
+ if r == nil {
+ r = Empty()
+ }
+ return &r.attrs
+}
+
+// MarshalJSON encodes the resource attributes as a JSON list of { "Key":
+// "...", "Value": ... } pairs in order sorted by key.
+func (r *Resource) MarshalJSON() ([]byte, error) {
+ if r == nil {
+ r = Empty()
+ }
+ return r.attrs.MarshalJSON()
+}
+
+// Len returns the number of unique key-values in this Resource.
+func (r *Resource) Len() int {
+ if r == nil {
+ return 0
+ }
+ return r.attrs.Len()
+}
+
+// Encoded returns an encoded representation of the resource.
+func (r *Resource) Encoded(enc attribute.Encoder) string {
+ if r == nil {
+ return ""
+ }
+ return r.attrs.Encoded(enc)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/README.md b/vendor/go.opentelemetry.io/otel/sdk/trace/README.md
new file mode 100644
index 000000000..f2936e143
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/README.md
@@ -0,0 +1,3 @@
+# SDK Trace
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/trace)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go
new file mode 100644
index 000000000..ccc97e1b6
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go
@@ -0,0 +1,414 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+ "context"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/sdk/internal/env"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// Defaults for BatchSpanProcessorOptions.
+const (
+ DefaultMaxQueueSize = 2048
+ DefaultScheduleDelay = 5000
+ DefaultExportTimeout = 30000
+ DefaultMaxExportBatchSize = 512
+)
+
+// BatchSpanProcessorOption configures a BatchSpanProcessor.
+type BatchSpanProcessorOption func(o *BatchSpanProcessorOptions)
+
+// BatchSpanProcessorOptions is configuration settings for a
+// BatchSpanProcessor.
+type BatchSpanProcessorOptions struct {
+ // MaxQueueSize is the maximum queue size to buffer spans for delayed processing. If the
+ // queue gets full it drops the spans. Use BlockOnQueueFull to change this behavior.
+ // The default value of MaxQueueSize is 2048.
+ MaxQueueSize int
+
+ // BatchTimeout is the maximum duration for constructing a batch. Processor
+ // forcefully sends available spans when timeout is reached.
+ // The default value of BatchTimeout is 5000 msec.
+ BatchTimeout time.Duration
+
+ // ExportTimeout specifies the maximum duration for exporting spans. If the timeout
+ // is reached, the export will be cancelled.
+ // The default value of ExportTimeout is 30000 msec.
+ ExportTimeout time.Duration
+
+ // MaxExportBatchSize is the maximum number of spans to process in a single batch.
+ // If there are more than one batch worth of spans then it processes multiple batches
+ // of spans one batch after the other without any delay.
+ // The default value of MaxExportBatchSize is 512.
+ MaxExportBatchSize int
+
+ // BlockOnQueueFull blocks onEnd() and onStart() method if the queue is full
+ // AND if BlockOnQueueFull is set to true.
+ // Blocking option should be used carefully as it can severely affect the performance of an
+ // application.
+ BlockOnQueueFull bool
+}
+
+// batchSpanProcessor is a SpanProcessor that batches asynchronously-received
+// spans and sends them to a trace.Exporter when complete.
+type batchSpanProcessor struct {
+ e SpanExporter
+ o BatchSpanProcessorOptions
+
+ queue chan ReadOnlySpan
+ dropped uint32
+
+ batch []ReadOnlySpan
+ batchMutex sync.Mutex
+ timer *time.Timer
+ stopWait sync.WaitGroup
+ stopOnce sync.Once
+ stopCh chan struct{}
+ stopped atomic.Bool
+}
+
+var _ SpanProcessor = (*batchSpanProcessor)(nil)
+
+// NewBatchSpanProcessor creates a new SpanProcessor that will send completed
+// span batches to the exporter with the supplied options.
+//
+// If the exporter is nil, the span processor will perform no action.
+func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorOption) SpanProcessor {
+ maxQueueSize := env.BatchSpanProcessorMaxQueueSize(DefaultMaxQueueSize)
+ maxExportBatchSize := env.BatchSpanProcessorMaxExportBatchSize(DefaultMaxExportBatchSize)
+
+ if maxExportBatchSize > maxQueueSize {
+ if DefaultMaxExportBatchSize > maxQueueSize {
+ maxExportBatchSize = maxQueueSize
+ } else {
+ maxExportBatchSize = DefaultMaxExportBatchSize
+ }
+ }
+
+ o := BatchSpanProcessorOptions{
+ BatchTimeout: time.Duration(env.BatchSpanProcessorScheduleDelay(DefaultScheduleDelay)) * time.Millisecond,
+ ExportTimeout: time.Duration(env.BatchSpanProcessorExportTimeout(DefaultExportTimeout)) * time.Millisecond,
+ MaxQueueSize: maxQueueSize,
+ MaxExportBatchSize: maxExportBatchSize,
+ }
+ for _, opt := range options {
+ opt(&o)
+ }
+ bsp := &batchSpanProcessor{
+ e: exporter,
+ o: o,
+ batch: make([]ReadOnlySpan, 0, o.MaxExportBatchSize),
+ timer: time.NewTimer(o.BatchTimeout),
+ queue: make(chan ReadOnlySpan, o.MaxQueueSize),
+ stopCh: make(chan struct{}),
+ }
+
+ bsp.stopWait.Add(1)
+ go func() {
+ defer bsp.stopWait.Done()
+ bsp.processQueue()
+ bsp.drainQueue()
+ }()
+
+ return bsp
+}
+
+// OnStart method does nothing.
+func (bsp *batchSpanProcessor) OnStart(parent context.Context, s ReadWriteSpan) {}
+
+// OnEnd method enqueues a ReadOnlySpan for later processing.
+func (bsp *batchSpanProcessor) OnEnd(s ReadOnlySpan) {
+ // Do not enqueue spans after Shutdown.
+ if bsp.stopped.Load() {
+ return
+ }
+
+ // Do not enqueue spans if we are just going to drop them.
+ if bsp.e == nil {
+ return
+ }
+ bsp.enqueue(s)
+}
+
+// Shutdown flushes the queue and waits until all spans are processed.
+// It only executes once. Subsequent call does nothing.
+func (bsp *batchSpanProcessor) Shutdown(ctx context.Context) error {
+ var err error
+ bsp.stopOnce.Do(func() {
+ bsp.stopped.Store(true)
+ wait := make(chan struct{})
+ go func() {
+ close(bsp.stopCh)
+ bsp.stopWait.Wait()
+ if bsp.e != nil {
+ if err := bsp.e.Shutdown(ctx); err != nil {
+ otel.Handle(err)
+ }
+ }
+ close(wait)
+ }()
+ // Wait until the wait group is done or the context is cancelled
+ select {
+ case <-wait:
+ case <-ctx.Done():
+ err = ctx.Err()
+ }
+ })
+ return err
+}
+
+type forceFlushSpan struct {
+ ReadOnlySpan
+ flushed chan struct{}
+}
+
+func (f forceFlushSpan) SpanContext() trace.SpanContext {
+ return trace.NewSpanContext(trace.SpanContextConfig{TraceFlags: trace.FlagsSampled})
+}
+
+// ForceFlush exports all ended spans that have not yet been exported.
+func (bsp *batchSpanProcessor) ForceFlush(ctx context.Context) error {
+ // Interrupt if context is already canceled.
+ if err := ctx.Err(); err != nil {
+ return err
+ }
+
+ // Do nothing after Shutdown.
+ if bsp.stopped.Load() {
+ return nil
+ }
+
+ var err error
+ if bsp.e != nil {
+ flushCh := make(chan struct{})
+ if bsp.enqueueBlockOnQueueFull(ctx, forceFlushSpan{flushed: flushCh}) {
+ select {
+ case <-bsp.stopCh:
+ // The batchSpanProcessor is Shutdown.
+ return nil
+ case <-flushCh:
+ // Processed any items in queue prior to ForceFlush being called
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ }
+
+ wait := make(chan error)
+ go func() {
+ wait <- bsp.exportSpans(ctx)
+ close(wait)
+ }()
+ // Wait until the export is finished or the context is cancelled/timed out
+ select {
+ case err = <-wait:
+ case <-ctx.Done():
+ err = ctx.Err()
+ }
+ }
+ return err
+}
+
+// WithMaxQueueSize returns a BatchSpanProcessorOption that configures the
+// maximum queue size allowed for a BatchSpanProcessor.
+func WithMaxQueueSize(size int) BatchSpanProcessorOption {
+ return func(o *BatchSpanProcessorOptions) {
+ o.MaxQueueSize = size
+ }
+}
+
+// WithMaxExportBatchSize returns a BatchSpanProcessorOption that configures
+// the maximum export batch size allowed for a BatchSpanProcessor.
+func WithMaxExportBatchSize(size int) BatchSpanProcessorOption {
+ return func(o *BatchSpanProcessorOptions) {
+ o.MaxExportBatchSize = size
+ }
+}
+
+// WithBatchTimeout returns a BatchSpanProcessorOption that configures the
+// maximum delay allowed for a BatchSpanProcessor before it will export any
+// held span (whether the queue is full or not).
+func WithBatchTimeout(delay time.Duration) BatchSpanProcessorOption {
+ return func(o *BatchSpanProcessorOptions) {
+ o.BatchTimeout = delay
+ }
+}
+
+// WithExportTimeout returns a BatchSpanProcessorOption that configures the
+// amount of time a BatchSpanProcessor waits for an exporter to export before
+// abandoning the export.
+func WithExportTimeout(timeout time.Duration) BatchSpanProcessorOption {
+ return func(o *BatchSpanProcessorOptions) {
+ o.ExportTimeout = timeout
+ }
+}
+
+// WithBlocking returns a BatchSpanProcessorOption that configures a
+// BatchSpanProcessor to wait for enqueue operations to succeed instead of
+// dropping data when the queue is full.
+func WithBlocking() BatchSpanProcessorOption {
+ return func(o *BatchSpanProcessorOptions) {
+ o.BlockOnQueueFull = true
+ }
+}
+
+// exportSpans is a subroutine of processing and draining the queue.
+func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error {
+ bsp.timer.Reset(bsp.o.BatchTimeout)
+
+ bsp.batchMutex.Lock()
+ defer bsp.batchMutex.Unlock()
+
+ if bsp.o.ExportTimeout > 0 {
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithTimeout(ctx, bsp.o.ExportTimeout)
+ defer cancel()
+ }
+
+ if l := len(bsp.batch); l > 0 {
+ global.Debug("exporting spans", "count", len(bsp.batch), "total_dropped", atomic.LoadUint32(&bsp.dropped))
+ err := bsp.e.ExportSpans(ctx, bsp.batch)
+
+ // A new batch is always created after exporting, even if the batch failed to be exported.
+ //
+ // It is up to the exporter to implement any type of retry logic if a batch is failing
+ // to be exported, since it is specific to the protocol and backend being sent to.
+ clear(bsp.batch) // Erase elements to let GC collect objects
+ bsp.batch = bsp.batch[:0]
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// processQueue removes spans from the `queue` channel until processor
+// is shut down. It calls the exporter in batches of up to MaxExportBatchSize
+// waiting up to BatchTimeout to form a batch.
+func (bsp *batchSpanProcessor) processQueue() {
+ defer bsp.timer.Stop()
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ for {
+ select {
+ case <-bsp.stopCh:
+ return
+ case <-bsp.timer.C:
+ if err := bsp.exportSpans(ctx); err != nil {
+ otel.Handle(err)
+ }
+ case sd := <-bsp.queue:
+ if ffs, ok := sd.(forceFlushSpan); ok {
+ close(ffs.flushed)
+ continue
+ }
+ bsp.batchMutex.Lock()
+ bsp.batch = append(bsp.batch, sd)
+ shouldExport := len(bsp.batch) >= bsp.o.MaxExportBatchSize
+ bsp.batchMutex.Unlock()
+ if shouldExport {
+ if !bsp.timer.Stop() {
+ // Handle both GODEBUG=asynctimerchan=[0|1] properly.
+ select {
+ case <-bsp.timer.C:
+ default:
+ }
+ }
+ if err := bsp.exportSpans(ctx); err != nil {
+ otel.Handle(err)
+ }
+ }
+ }
+ }
+}
+
+// drainQueue awaits the any caller that had added to bsp.stopWait
+// to finish the enqueue, then exports the final batch.
+func (bsp *batchSpanProcessor) drainQueue() {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ for {
+ select {
+ case sd := <-bsp.queue:
+ if _, ok := sd.(forceFlushSpan); ok {
+ // Ignore flush requests as they are not valid spans.
+ continue
+ }
+
+ bsp.batchMutex.Lock()
+ bsp.batch = append(bsp.batch, sd)
+ shouldExport := len(bsp.batch) == bsp.o.MaxExportBatchSize
+ bsp.batchMutex.Unlock()
+
+ if shouldExport {
+ if err := bsp.exportSpans(ctx); err != nil {
+ otel.Handle(err)
+ }
+ }
+ default:
+ // There are no more enqueued spans. Make final export.
+ if err := bsp.exportSpans(ctx); err != nil {
+ otel.Handle(err)
+ }
+ return
+ }
+ }
+}
+
+func (bsp *batchSpanProcessor) enqueue(sd ReadOnlySpan) {
+ ctx := context.TODO()
+ if bsp.o.BlockOnQueueFull {
+ bsp.enqueueBlockOnQueueFull(ctx, sd)
+ } else {
+ bsp.enqueueDrop(ctx, sd)
+ }
+}
+
+func (bsp *batchSpanProcessor) enqueueBlockOnQueueFull(ctx context.Context, sd ReadOnlySpan) bool {
+ if !sd.SpanContext().IsSampled() {
+ return false
+ }
+
+ select {
+ case bsp.queue <- sd:
+ return true
+ case <-ctx.Done():
+ return false
+ }
+}
+
+func (bsp *batchSpanProcessor) enqueueDrop(_ context.Context, sd ReadOnlySpan) bool {
+ if !sd.SpanContext().IsSampled() {
+ return false
+ }
+
+ select {
+ case bsp.queue <- sd:
+ return true
+ default:
+ atomic.AddUint32(&bsp.dropped, 1)
+ }
+ return false
+}
+
+// MarshalLog is the marshaling function used by the logging system to represent this Span Processor.
+func (bsp *batchSpanProcessor) MarshalLog() interface{} {
+ return struct {
+ Type string
+ SpanExporter SpanExporter
+ Config BatchSpanProcessorOptions
+ }{
+ Type: "BatchSpanProcessor",
+ SpanExporter: bsp.e,
+ Config: bsp.o,
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go
new file mode 100644
index 000000000..1f60524e3
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go
@@ -0,0 +1,10 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+/*
+Package trace contains support for OpenTelemetry distributed tracing.
+
+The following assumes a basic familiarity with OpenTelemetry concepts.
+See https://opentelemetry.io.
+*/
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/event.go b/vendor/go.opentelemetry.io/otel/sdk/trace/event.go
new file mode 100644
index 000000000..60a7ed134
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/event.go
@@ -0,0 +1,26 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+ "time"
+
+ "go.opentelemetry.io/otel/attribute"
+)
+
+// Event is a thing that happened during a Span's lifetime.
+type Event struct {
+ // Name is the name of this event
+ Name string
+
+ // Attributes describe the aspects of the event.
+ Attributes []attribute.KeyValue
+
+ // DroppedAttributeCount is the number of attributes that were not
+ // recorded due to configured limits being reached.
+ DroppedAttributeCount int
+
+ // Time at which this event was recorded.
+ Time time.Time
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go b/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go
new file mode 100644
index 000000000..8c308dd60
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go
@@ -0,0 +1,64 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+ "slices"
+ "sync"
+
+ "go.opentelemetry.io/otel/internal/global"
+)
+
+// evictedQueue is a FIFO queue with a configurable capacity.
+type evictedQueue[T any] struct {
+ queue []T
+ capacity int
+ droppedCount int
+ logDroppedMsg string
+ logDroppedOnce sync.Once
+}
+
+func newEvictedQueueEvent(capacity int) evictedQueue[Event] {
+ // Do not pre-allocate queue, do this lazily.
+ return evictedQueue[Event]{
+ capacity: capacity,
+ logDroppedMsg: "limit reached: dropping trace trace.Event",
+ }
+}
+
+func newEvictedQueueLink(capacity int) evictedQueue[Link] {
+ // Do not pre-allocate queue, do this lazily.
+ return evictedQueue[Link]{
+ capacity: capacity,
+ logDroppedMsg: "limit reached: dropping trace trace.Link",
+ }
+}
+
+// add adds value to the evictedQueue eq. If eq is at capacity, the oldest
+// queued value will be discarded and the drop count incremented.
+func (eq *evictedQueue[T]) add(value T) {
+ if eq.capacity == 0 {
+ eq.droppedCount++
+ eq.logDropped()
+ return
+ }
+
+ if eq.capacity > 0 && len(eq.queue) == eq.capacity {
+ // Drop first-in while avoiding allocating more capacity to eq.queue.
+ copy(eq.queue[:eq.capacity-1], eq.queue[1:])
+ eq.queue = eq.queue[:eq.capacity-1]
+ eq.droppedCount++
+ eq.logDropped()
+ }
+ eq.queue = append(eq.queue, value)
+}
+
+func (eq *evictedQueue[T]) logDropped() {
+ eq.logDroppedOnce.Do(func() { global.Warn(eq.logDroppedMsg) })
+}
+
+// copy returns a copy of the evictedQueue.
+func (eq *evictedQueue[T]) copy() []T {
+ return slices.Clone(eq.queue)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go
new file mode 100644
index 000000000..925bcf993
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go
@@ -0,0 +1,81 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+ "context"
+ crand "crypto/rand"
+ "encoding/binary"
+ "math/rand"
+ "sync"
+
+ "go.opentelemetry.io/otel/trace"
+)
+
+// IDGenerator allows custom generators for TraceID and SpanID.
+type IDGenerator interface {
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // NewIDs returns a new trace and span ID.
+ NewIDs(ctx context.Context) (trace.TraceID, trace.SpanID)
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // NewSpanID returns a ID for a new span in the trace with traceID.
+ NewSpanID(ctx context.Context, traceID trace.TraceID) trace.SpanID
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+}
+
+type randomIDGenerator struct {
+ sync.Mutex
+ randSource *rand.Rand
+}
+
+var _ IDGenerator = &randomIDGenerator{}
+
+// NewSpanID returns a non-zero span ID from a randomly-chosen sequence.
+func (gen *randomIDGenerator) NewSpanID(ctx context.Context, traceID trace.TraceID) trace.SpanID {
+ gen.Lock()
+ defer gen.Unlock()
+ sid := trace.SpanID{}
+ for {
+ _, _ = gen.randSource.Read(sid[:])
+ if sid.IsValid() {
+ break
+ }
+ }
+ return sid
+}
+
+// NewIDs returns a non-zero trace ID and a non-zero span ID from a
+// randomly-chosen sequence.
+func (gen *randomIDGenerator) NewIDs(ctx context.Context) (trace.TraceID, trace.SpanID) {
+ gen.Lock()
+ defer gen.Unlock()
+ tid := trace.TraceID{}
+ sid := trace.SpanID{}
+ for {
+ _, _ = gen.randSource.Read(tid[:])
+ if tid.IsValid() {
+ break
+ }
+ }
+ for {
+ _, _ = gen.randSource.Read(sid[:])
+ if sid.IsValid() {
+ break
+ }
+ }
+ return tid, sid
+}
+
+func defaultIDGenerator() IDGenerator {
+ gen := &randomIDGenerator{}
+ var rngSeed int64
+ _ = binary.Read(crand.Reader, binary.LittleEndian, &rngSeed)
+ gen.randSource = rand.New(rand.NewSource(rngSeed))
+ return gen
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/link.go b/vendor/go.opentelemetry.io/otel/sdk/trace/link.go
new file mode 100644
index 000000000..c03bdc90f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/link.go
@@ -0,0 +1,23 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// Link is the relationship between two Spans. The relationship can be within
+// the same Trace or across different Traces.
+type Link struct {
+ // SpanContext of the linked Span.
+ SpanContext trace.SpanContext
+
+ // Attributes describe the aspects of the link.
+ Attributes []attribute.KeyValue
+
+ // DroppedAttributeCount is the number of attributes that were not
+ // recorded due to configured limits being reached.
+ DroppedAttributeCount int
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go
new file mode 100644
index 000000000..185aa7c08
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go
@@ -0,0 +1,494 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+ "context"
+ "fmt"
+ "sync"
+ "sync/atomic"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/sdk/instrumentation"
+ "go.opentelemetry.io/otel/sdk/resource"
+ "go.opentelemetry.io/otel/trace"
+ "go.opentelemetry.io/otel/trace/embedded"
+ "go.opentelemetry.io/otel/trace/noop"
+)
+
+const (
+ defaultTracerName = "go.opentelemetry.io/otel/sdk/tracer"
+)
+
+// tracerProviderConfig.
+type tracerProviderConfig struct {
+ // processors contains collection of SpanProcessors that are processing pipeline
+ // for spans in the trace signal.
+ // SpanProcessors registered with a TracerProvider and are called at the start
+ // and end of a Span's lifecycle, and are called in the order they are
+ // registered.
+ processors []SpanProcessor
+
+ // sampler is the default sampler used when creating new spans.
+ sampler Sampler
+
+ // idGenerator is used to generate all Span and Trace IDs when needed.
+ idGenerator IDGenerator
+
+ // spanLimits defines the attribute, event, and link limits for spans.
+ spanLimits SpanLimits
+
+ // resource contains attributes representing an entity that produces telemetry.
+ resource *resource.Resource
+}
+
+// MarshalLog is the marshaling function used by the logging system to represent this Provider.
+func (cfg tracerProviderConfig) MarshalLog() interface{} {
+ return struct {
+ SpanProcessors []SpanProcessor
+ SamplerType string
+ IDGeneratorType string
+ SpanLimits SpanLimits
+ Resource *resource.Resource
+ }{
+ SpanProcessors: cfg.processors,
+ SamplerType: fmt.Sprintf("%T", cfg.sampler),
+ IDGeneratorType: fmt.Sprintf("%T", cfg.idGenerator),
+ SpanLimits: cfg.spanLimits,
+ Resource: cfg.resource,
+ }
+}
+
+// TracerProvider is an OpenTelemetry TracerProvider. It provides Tracers to
+// instrumentation so it can trace operational flow through a system.
+type TracerProvider struct {
+ embedded.TracerProvider
+
+ mu sync.Mutex
+ namedTracer map[instrumentation.Scope]*tracer
+ spanProcessors atomic.Pointer[spanProcessorStates]
+
+ isShutdown atomic.Bool
+
+ // These fields are not protected by the lock mu. They are assumed to be
+ // immutable after creation of the TracerProvider.
+ sampler Sampler
+ idGenerator IDGenerator
+ spanLimits SpanLimits
+ resource *resource.Resource
+}
+
+var _ trace.TracerProvider = &TracerProvider{}
+
+// NewTracerProvider returns a new and configured TracerProvider.
+//
+// By default the returned TracerProvider is configured with:
+// - a ParentBased(AlwaysSample) Sampler
+// - a random number IDGenerator
+// - the resource.Default() Resource
+// - the default SpanLimits.
+//
+// The passed opts are used to override these default values and configure the
+// returned TracerProvider appropriately.
+func NewTracerProvider(opts ...TracerProviderOption) *TracerProvider {
+ o := tracerProviderConfig{
+ spanLimits: NewSpanLimits(),
+ }
+ o = applyTracerProviderEnvConfigs(o)
+
+ for _, opt := range opts {
+ o = opt.apply(o)
+ }
+
+ o = ensureValidTracerProviderConfig(o)
+
+ tp := &TracerProvider{
+ namedTracer: make(map[instrumentation.Scope]*tracer),
+ sampler: o.sampler,
+ idGenerator: o.idGenerator,
+ spanLimits: o.spanLimits,
+ resource: o.resource,
+ }
+ global.Info("TracerProvider created", "config", o)
+
+ spss := make(spanProcessorStates, 0, len(o.processors))
+ for _, sp := range o.processors {
+ spss = append(spss, newSpanProcessorState(sp))
+ }
+ tp.spanProcessors.Store(&spss)
+
+ return tp
+}
+
+// Tracer returns a Tracer with the given name and options. If a Tracer for
+// the given name and options does not exist it is created, otherwise the
+// existing Tracer is returned.
+//
+// If name is empty, DefaultTracerName is used instead.
+//
+// This method is safe to be called concurrently.
+func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer {
+ // This check happens before the mutex is acquired to avoid deadlocking if Tracer() is called from within Shutdown().
+ if p.isShutdown.Load() {
+ return noop.NewTracerProvider().Tracer(name, opts...)
+ }
+ c := trace.NewTracerConfig(opts...)
+ if name == "" {
+ name = defaultTracerName
+ }
+ is := instrumentation.Scope{
+ Name: name,
+ Version: c.InstrumentationVersion(),
+ SchemaURL: c.SchemaURL(),
+ Attributes: c.InstrumentationAttributes(),
+ }
+
+ t, ok := func() (trace.Tracer, bool) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ // Must check the flag after acquiring the mutex to avoid returning a valid tracer if Shutdown() ran
+ // after the first check above but before we acquired the mutex.
+ if p.isShutdown.Load() {
+ return noop.NewTracerProvider().Tracer(name, opts...), true
+ }
+ t, ok := p.namedTracer[is]
+ if !ok {
+ t = &tracer{
+ provider: p,
+ instrumentationScope: is,
+ }
+ p.namedTracer[is] = t
+ }
+ return t, ok
+ }()
+ if !ok {
+ // This code is outside the mutex to not hold the lock while calling third party logging code:
+ // - That code may do slow things like I/O, which would prolong the duration the lock is held,
+ // slowing down all tracing consumers.
+ // - Logging code may be instrumented with tracing and deadlock because it could try
+ // acquiring the same non-reentrant mutex.
+ global.Info("Tracer created", "name", name, "version", is.Version, "schemaURL", is.SchemaURL, "attributes", is.Attributes)
+ }
+ return t
+}
+
+// RegisterSpanProcessor adds the given SpanProcessor to the list of SpanProcessors.
+func (p *TracerProvider) RegisterSpanProcessor(sp SpanProcessor) {
+ // This check prevents calls during a shutdown.
+ if p.isShutdown.Load() {
+ return
+ }
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ // This check prevents calls after a shutdown.
+ if p.isShutdown.Load() {
+ return
+ }
+
+ current := p.getSpanProcessors()
+ newSPS := make(spanProcessorStates, 0, len(current)+1)
+ newSPS = append(newSPS, current...)
+ newSPS = append(newSPS, newSpanProcessorState(sp))
+ p.spanProcessors.Store(&newSPS)
+}
+
+// UnregisterSpanProcessor removes the given SpanProcessor from the list of SpanProcessors.
+func (p *TracerProvider) UnregisterSpanProcessor(sp SpanProcessor) {
+ // This check prevents calls during a shutdown.
+ if p.isShutdown.Load() {
+ return
+ }
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ // This check prevents calls after a shutdown.
+ if p.isShutdown.Load() {
+ return
+ }
+ old := p.getSpanProcessors()
+ if len(old) == 0 {
+ return
+ }
+ spss := make(spanProcessorStates, len(old))
+ copy(spss, old)
+
+ // stop the span processor if it is started and remove it from the list
+ var stopOnce *spanProcessorState
+ var idx int
+ for i, sps := range spss {
+ if sps.sp == sp {
+ stopOnce = sps
+ idx = i
+ }
+ }
+ if stopOnce != nil {
+ stopOnce.state.Do(func() {
+ if err := sp.Shutdown(context.Background()); err != nil {
+ otel.Handle(err)
+ }
+ })
+ }
+ if len(spss) > 1 {
+ copy(spss[idx:], spss[idx+1:])
+ }
+ spss[len(spss)-1] = nil
+ spss = spss[:len(spss)-1]
+
+ p.spanProcessors.Store(&spss)
+}
+
+// ForceFlush immediately exports all spans that have not yet been exported for
+// all the registered span processors.
+func (p *TracerProvider) ForceFlush(ctx context.Context) error {
+ spss := p.getSpanProcessors()
+ if len(spss) == 0 {
+ return nil
+ }
+
+ for _, sps := range spss {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ if err := sps.sp.ForceFlush(ctx); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Shutdown shuts down TracerProvider. All registered span processors are shut down
+// in the order they were registered and any held computational resources are released.
+// After Shutdown is called, all methods are no-ops.
+func (p *TracerProvider) Shutdown(ctx context.Context) error {
+ // This check prevents deadlocks in case of recursive shutdown.
+ if p.isShutdown.Load() {
+ return nil
+ }
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ // This check prevents calls after a shutdown has already been done concurrently.
+ if !p.isShutdown.CompareAndSwap(false, true) { // did toggle?
+ return nil
+ }
+
+ var retErr error
+ for _, sps := range p.getSpanProcessors() {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ var err error
+ sps.state.Do(func() {
+ err = sps.sp.Shutdown(ctx)
+ })
+ if err != nil {
+ if retErr == nil {
+ retErr = err
+ } else {
+ // Poor man's list of errors
+ retErr = fmt.Errorf("%w; %w", retErr, err)
+ }
+ }
+ }
+ p.spanProcessors.Store(&spanProcessorStates{})
+ return retErr
+}
+
+func (p *TracerProvider) getSpanProcessors() spanProcessorStates {
+ return *(p.spanProcessors.Load())
+}
+
+// TracerProviderOption configures a TracerProvider.
+type TracerProviderOption interface {
+ apply(tracerProviderConfig) tracerProviderConfig
+}
+
+type traceProviderOptionFunc func(tracerProviderConfig) tracerProviderConfig
+
+func (fn traceProviderOptionFunc) apply(cfg tracerProviderConfig) tracerProviderConfig {
+ return fn(cfg)
+}
+
+// WithSyncer registers the exporter with the TracerProvider using a
+// SimpleSpanProcessor.
+//
+// This is not recommended for production use. The synchronous nature of the
+// SimpleSpanProcessor that will wrap the exporter make it good for testing,
+// debugging, or showing examples of other feature, but it will be slow and
+// have a high computation resource usage overhead. The WithBatcher option is
+// recommended for production use instead.
+func WithSyncer(e SpanExporter) TracerProviderOption {
+ return WithSpanProcessor(NewSimpleSpanProcessor(e))
+}
+
+// WithBatcher registers the exporter with the TracerProvider using a
+// BatchSpanProcessor configured with the passed opts.
+func WithBatcher(e SpanExporter, opts ...BatchSpanProcessorOption) TracerProviderOption {
+ return WithSpanProcessor(NewBatchSpanProcessor(e, opts...))
+}
+
+// WithSpanProcessor registers the SpanProcessor with a TracerProvider.
+func WithSpanProcessor(sp SpanProcessor) TracerProviderOption {
+ return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig {
+ cfg.processors = append(cfg.processors, sp)
+ return cfg
+ })
+}
+
+// WithResource returns a TracerProviderOption that will configure the
+// Resource r as a TracerProvider's Resource. The configured Resource is
+// referenced by all the Tracers the TracerProvider creates. It represents the
+// entity producing telemetry.
+//
+// If this option is not used, the TracerProvider will use the
+// resource.Default() Resource by default.
+func WithResource(r *resource.Resource) TracerProviderOption {
+ return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig {
+ var err error
+ cfg.resource, err = resource.Merge(resource.Environment(), r)
+ if err != nil {
+ otel.Handle(err)
+ }
+ return cfg
+ })
+}
+
+// WithIDGenerator returns a TracerProviderOption that will configure the
+// IDGenerator g as a TracerProvider's IDGenerator. The configured IDGenerator
+// is used by the Tracers the TracerProvider creates to generate new Span and
+// Trace IDs.
+//
+// If this option is not used, the TracerProvider will use a random number
+// IDGenerator by default.
+func WithIDGenerator(g IDGenerator) TracerProviderOption {
+ return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig {
+ if g != nil {
+ cfg.idGenerator = g
+ }
+ return cfg
+ })
+}
+
+// WithSampler returns a TracerProviderOption that will configure the Sampler
+// s as a TracerProvider's Sampler. The configured Sampler is used by the
+// Tracers the TracerProvider creates to make their sampling decisions for the
+// Spans they create.
+//
+// This option overrides the Sampler configured through the OTEL_TRACES_SAMPLER
+// and OTEL_TRACES_SAMPLER_ARG environment variables. If this option is not used
+// and the sampler is not configured through environment variables or the environment
+// contains invalid/unsupported configuration, the TracerProvider will use a
+// ParentBased(AlwaysSample) Sampler by default.
+func WithSampler(s Sampler) TracerProviderOption {
+ return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig {
+ if s != nil {
+ cfg.sampler = s
+ }
+ return cfg
+ })
+}
+
+// WithSpanLimits returns a TracerProviderOption that configures a
+// TracerProvider to use the SpanLimits sl. These SpanLimits bound any Span
+// created by a Tracer from the TracerProvider.
+//
+// If any field of sl is zero or negative it will be replaced with the default
+// value for that field.
+//
+// If this or WithRawSpanLimits are not provided, the TracerProvider will use
+// the limits defined by environment variables, or the defaults if unset.
+// Refer to the NewSpanLimits documentation for information about this
+// relationship.
+//
+// Deprecated: Use WithRawSpanLimits instead which allows setting unlimited
+// and zero limits. This option will be kept until the next major version
+// incremented release.
+func WithSpanLimits(sl SpanLimits) TracerProviderOption {
+ if sl.AttributeValueLengthLimit <= 0 {
+ sl.AttributeValueLengthLimit = DefaultAttributeValueLengthLimit
+ }
+ if sl.AttributeCountLimit <= 0 {
+ sl.AttributeCountLimit = DefaultAttributeCountLimit
+ }
+ if sl.EventCountLimit <= 0 {
+ sl.EventCountLimit = DefaultEventCountLimit
+ }
+ if sl.AttributePerEventCountLimit <= 0 {
+ sl.AttributePerEventCountLimit = DefaultAttributePerEventCountLimit
+ }
+ if sl.LinkCountLimit <= 0 {
+ sl.LinkCountLimit = DefaultLinkCountLimit
+ }
+ if sl.AttributePerLinkCountLimit <= 0 {
+ sl.AttributePerLinkCountLimit = DefaultAttributePerLinkCountLimit
+ }
+ return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig {
+ cfg.spanLimits = sl
+ return cfg
+ })
+}
+
+// WithRawSpanLimits returns a TracerProviderOption that configures a
+// TracerProvider to use these limits. These limits bound any Span created by
+// a Tracer from the TracerProvider.
+//
+// The limits will be used as-is. Zero or negative values will not be changed
+// to the default value like WithSpanLimits does. Setting a limit to zero will
+// effectively disable the related resource it limits and setting to a
+// negative value will mean that resource is unlimited. Consequentially, this
+// means that the zero-value SpanLimits will disable all span resources.
+// Because of this, limits should be constructed using NewSpanLimits and
+// updated accordingly.
+//
+// If this or WithSpanLimits are not provided, the TracerProvider will use the
+// limits defined by environment variables, or the defaults if unset. Refer to
+// the NewSpanLimits documentation for information about this relationship.
+func WithRawSpanLimits(limits SpanLimits) TracerProviderOption {
+ return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig {
+ cfg.spanLimits = limits
+ return cfg
+ })
+}
+
+func applyTracerProviderEnvConfigs(cfg tracerProviderConfig) tracerProviderConfig {
+ for _, opt := range tracerProviderOptionsFromEnv() {
+ cfg = opt.apply(cfg)
+ }
+
+ return cfg
+}
+
+func tracerProviderOptionsFromEnv() []TracerProviderOption {
+ var opts []TracerProviderOption
+
+ sampler, err := samplerFromEnv()
+ if err != nil {
+ otel.Handle(err)
+ }
+
+ if sampler != nil {
+ opts = append(opts, WithSampler(sampler))
+ }
+
+ return opts
+}
+
+// ensureValidTracerProviderConfig ensures that given TracerProviderConfig is valid.
+func ensureValidTracerProviderConfig(cfg tracerProviderConfig) tracerProviderConfig {
+ if cfg.sampler == nil {
+ cfg.sampler = ParentBased(AlwaysSample())
+ }
+ if cfg.idGenerator == nil {
+ cfg.idGenerator = defaultIDGenerator()
+ }
+ if cfg.resource == nil {
+ cfg.resource = resource.Default()
+ }
+ return cfg
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go
new file mode 100644
index 000000000..9b672a1d7
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go
@@ -0,0 +1,96 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+ "errors"
+ "os"
+ "strconv"
+ "strings"
+)
+
+const (
+ tracesSamplerKey = "OTEL_TRACES_SAMPLER"
+ tracesSamplerArgKey = "OTEL_TRACES_SAMPLER_ARG"
+
+ samplerAlwaysOn = "always_on"
+ samplerAlwaysOff = "always_off"
+ samplerTraceIDRatio = "traceidratio"
+ samplerParentBasedAlwaysOn = "parentbased_always_on"
+ samplerParsedBasedAlwaysOff = "parentbased_always_off"
+ samplerParentBasedTraceIDRatio = "parentbased_traceidratio"
+)
+
+type errUnsupportedSampler string
+
+func (e errUnsupportedSampler) Error() string {
+ return "unsupported sampler: " + string(e)
+}
+
+var (
+ errNegativeTraceIDRatio = errors.New("invalid trace ID ratio: less than 0.0")
+ errGreaterThanOneTraceIDRatio = errors.New("invalid trace ID ratio: greater than 1.0")
+)
+
+type samplerArgParseError struct {
+ parseErr error
+}
+
+func (e samplerArgParseError) Error() string {
+ return "parsing sampler argument: " + e.parseErr.Error()
+}
+
+func (e samplerArgParseError) Unwrap() error {
+ return e.parseErr
+}
+
+func samplerFromEnv() (Sampler, error) {
+ sampler, ok := os.LookupEnv(tracesSamplerKey)
+ if !ok {
+ return nil, nil
+ }
+
+ sampler = strings.ToLower(strings.TrimSpace(sampler))
+ samplerArg, hasSamplerArg := os.LookupEnv(tracesSamplerArgKey)
+ samplerArg = strings.TrimSpace(samplerArg)
+
+ switch sampler {
+ case samplerAlwaysOn:
+ return AlwaysSample(), nil
+ case samplerAlwaysOff:
+ return NeverSample(), nil
+ case samplerTraceIDRatio:
+ if !hasSamplerArg {
+ return TraceIDRatioBased(1.0), nil
+ }
+ return parseTraceIDRatio(samplerArg)
+ case samplerParentBasedAlwaysOn:
+ return ParentBased(AlwaysSample()), nil
+ case samplerParsedBasedAlwaysOff:
+ return ParentBased(NeverSample()), nil
+ case samplerParentBasedTraceIDRatio:
+ if !hasSamplerArg {
+ return ParentBased(TraceIDRatioBased(1.0)), nil
+ }
+ ratio, err := parseTraceIDRatio(samplerArg)
+ return ParentBased(ratio), err
+ default:
+ return nil, errUnsupportedSampler(sampler)
+ }
+}
+
+func parseTraceIDRatio(arg string) (Sampler, error) {
+ v, err := strconv.ParseFloat(arg, 64)
+ if err != nil {
+ return TraceIDRatioBased(1.0), samplerArgParseError{err}
+ }
+ if v < 0.0 {
+ return TraceIDRatioBased(1.0), errNegativeTraceIDRatio
+ }
+ if v > 1.0 {
+ return TraceIDRatioBased(1.0), errGreaterThanOneTraceIDRatio
+ }
+
+ return TraceIDRatioBased(v), nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go
new file mode 100644
index 000000000..ebb6df6c9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go
@@ -0,0 +1,282 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+ "context"
+ "encoding/binary"
+ "fmt"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// Sampler decides whether a trace should be sampled and exported.
+type Sampler interface {
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // ShouldSample returns a SamplingResult based on a decision made from the
+ // passed parameters.
+ ShouldSample(parameters SamplingParameters) SamplingResult
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Description returns information describing the Sampler.
+ Description() string
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+}
+
+// SamplingParameters contains the values passed to a Sampler.
+type SamplingParameters struct {
+ ParentContext context.Context
+ TraceID trace.TraceID
+ Name string
+ Kind trace.SpanKind
+ Attributes []attribute.KeyValue
+ Links []trace.Link
+}
+
+// SamplingDecision indicates whether a span is dropped, recorded and/or sampled.
+type SamplingDecision uint8
+
+// Valid sampling decisions.
+const (
+ // Drop will not record the span and all attributes/events will be dropped.
+ Drop SamplingDecision = iota
+
+ // Record indicates the span's `IsRecording() == true`, but `Sampled` flag
+ // *must not* be set.
+ RecordOnly
+
+ // RecordAndSample has span's `IsRecording() == true` and `Sampled` flag
+ // *must* be set.
+ RecordAndSample
+)
+
+// SamplingResult conveys a SamplingDecision, set of Attributes and a Tracestate.
+type SamplingResult struct {
+ Decision SamplingDecision
+ Attributes []attribute.KeyValue
+ Tracestate trace.TraceState
+}
+
+type traceIDRatioSampler struct {
+ traceIDUpperBound uint64
+ description string
+}
+
+func (ts traceIDRatioSampler) ShouldSample(p SamplingParameters) SamplingResult {
+ psc := trace.SpanContextFromContext(p.ParentContext)
+ x := binary.BigEndian.Uint64(p.TraceID[8:16]) >> 1
+ if x < ts.traceIDUpperBound {
+ return SamplingResult{
+ Decision: RecordAndSample,
+ Tracestate: psc.TraceState(),
+ }
+ }
+ return SamplingResult{
+ Decision: Drop,
+ Tracestate: psc.TraceState(),
+ }
+}
+
+func (ts traceIDRatioSampler) Description() string {
+ return ts.description
+}
+
+// TraceIDRatioBased samples a given fraction of traces. Fractions >= 1 will
+// always sample. Fractions < 0 are treated as zero. To respect the
+// parent trace's `SampledFlag`, the `TraceIDRatioBased` sampler should be used
+// as a delegate of a `Parent` sampler.
+//
+//nolint:revive // revive complains about stutter of `trace.TraceIDRatioBased`
+func TraceIDRatioBased(fraction float64) Sampler {
+ if fraction >= 1 {
+ return AlwaysSample()
+ }
+
+ if fraction <= 0 {
+ fraction = 0
+ }
+
+ return &traceIDRatioSampler{
+ traceIDUpperBound: uint64(fraction * (1 << 63)),
+ description: fmt.Sprintf("TraceIDRatioBased{%g}", fraction),
+ }
+}
+
+type alwaysOnSampler struct{}
+
+func (as alwaysOnSampler) ShouldSample(p SamplingParameters) SamplingResult {
+ return SamplingResult{
+ Decision: RecordAndSample,
+ Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(),
+ }
+}
+
+func (as alwaysOnSampler) Description() string {
+ return "AlwaysOnSampler"
+}
+
+// AlwaysSample returns a Sampler that samples every trace.
+// Be careful about using this sampler in a production application with
+// significant traffic: a new trace will be started and exported for every
+// request.
+func AlwaysSample() Sampler {
+ return alwaysOnSampler{}
+}
+
+type alwaysOffSampler struct{}
+
+func (as alwaysOffSampler) ShouldSample(p SamplingParameters) SamplingResult {
+ return SamplingResult{
+ Decision: Drop,
+ Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(),
+ }
+}
+
+func (as alwaysOffSampler) Description() string {
+ return "AlwaysOffSampler"
+}
+
+// NeverSample returns a Sampler that samples no traces.
+func NeverSample() Sampler {
+ return alwaysOffSampler{}
+}
+
+// ParentBased returns a sampler decorator which behaves differently,
+// based on the parent of the span. If the span has no parent,
+// the decorated sampler is used to make sampling decision. If the span has
+// a parent, depending on whether the parent is remote and whether it
+// is sampled, one of the following samplers will apply:
+// - remoteParentSampled(Sampler) (default: AlwaysOn)
+// - remoteParentNotSampled(Sampler) (default: AlwaysOff)
+// - localParentSampled(Sampler) (default: AlwaysOn)
+// - localParentNotSampled(Sampler) (default: AlwaysOff)
+func ParentBased(root Sampler, samplers ...ParentBasedSamplerOption) Sampler {
+ return parentBased{
+ root: root,
+ config: configureSamplersForParentBased(samplers),
+ }
+}
+
+type parentBased struct {
+ root Sampler
+ config samplerConfig
+}
+
+func configureSamplersForParentBased(samplers []ParentBasedSamplerOption) samplerConfig {
+ c := samplerConfig{
+ remoteParentSampled: AlwaysSample(),
+ remoteParentNotSampled: NeverSample(),
+ localParentSampled: AlwaysSample(),
+ localParentNotSampled: NeverSample(),
+ }
+
+ for _, so := range samplers {
+ c = so.apply(c)
+ }
+
+ return c
+}
+
+// samplerConfig is a group of options for parentBased sampler.
+type samplerConfig struct {
+ remoteParentSampled, remoteParentNotSampled Sampler
+ localParentSampled, localParentNotSampled Sampler
+}
+
+// ParentBasedSamplerOption configures the sampler for a particular sampling case.
+type ParentBasedSamplerOption interface {
+ apply(samplerConfig) samplerConfig
+}
+
+// WithRemoteParentSampled sets the sampler for the case of sampled remote parent.
+func WithRemoteParentSampled(s Sampler) ParentBasedSamplerOption {
+ return remoteParentSampledOption{s}
+}
+
+type remoteParentSampledOption struct {
+ s Sampler
+}
+
+func (o remoteParentSampledOption) apply(config samplerConfig) samplerConfig {
+ config.remoteParentSampled = o.s
+ return config
+}
+
+// WithRemoteParentNotSampled sets the sampler for the case of remote parent
+// which is not sampled.
+func WithRemoteParentNotSampled(s Sampler) ParentBasedSamplerOption {
+ return remoteParentNotSampledOption{s}
+}
+
+type remoteParentNotSampledOption struct {
+ s Sampler
+}
+
+func (o remoteParentNotSampledOption) apply(config samplerConfig) samplerConfig {
+ config.remoteParentNotSampled = o.s
+ return config
+}
+
+// WithLocalParentSampled sets the sampler for the case of sampled local parent.
+func WithLocalParentSampled(s Sampler) ParentBasedSamplerOption {
+ return localParentSampledOption{s}
+}
+
+type localParentSampledOption struct {
+ s Sampler
+}
+
+func (o localParentSampledOption) apply(config samplerConfig) samplerConfig {
+ config.localParentSampled = o.s
+ return config
+}
+
+// WithLocalParentNotSampled sets the sampler for the case of local parent
+// which is not sampled.
+func WithLocalParentNotSampled(s Sampler) ParentBasedSamplerOption {
+ return localParentNotSampledOption{s}
+}
+
+type localParentNotSampledOption struct {
+ s Sampler
+}
+
+func (o localParentNotSampledOption) apply(config samplerConfig) samplerConfig {
+ config.localParentNotSampled = o.s
+ return config
+}
+
+func (pb parentBased) ShouldSample(p SamplingParameters) SamplingResult {
+ psc := trace.SpanContextFromContext(p.ParentContext)
+ if psc.IsValid() {
+ if psc.IsRemote() {
+ if psc.IsSampled() {
+ return pb.config.remoteParentSampled.ShouldSample(p)
+ }
+ return pb.config.remoteParentNotSampled.ShouldSample(p)
+ }
+
+ if psc.IsSampled() {
+ return pb.config.localParentSampled.ShouldSample(p)
+ }
+ return pb.config.localParentNotSampled.ShouldSample(p)
+ }
+ return pb.root.ShouldSample(p)
+}
+
+func (pb parentBased) Description() string {
+ return fmt.Sprintf("ParentBased{root:%s,remoteParentSampled:%s,"+
+ "remoteParentNotSampled:%s,localParentSampled:%s,localParentNotSampled:%s}",
+ pb.root.Description(),
+ pb.config.remoteParentSampled.Description(),
+ pb.config.remoteParentNotSampled.Description(),
+ pb.config.localParentSampled.Description(),
+ pb.config.localParentNotSampled.Description(),
+ )
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go
new file mode 100644
index 000000000..554111bb4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go
@@ -0,0 +1,121 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+ "context"
+ "sync"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/internal/global"
+)
+
+// simpleSpanProcessor is a SpanProcessor that synchronously sends all
+// completed Spans to a trace.Exporter immediately.
+type simpleSpanProcessor struct {
+ exporterMu sync.Mutex
+ exporter SpanExporter
+ stopOnce sync.Once
+}
+
+var _ SpanProcessor = (*simpleSpanProcessor)(nil)
+
+// NewSimpleSpanProcessor returns a new SpanProcessor that will synchronously
+// send completed spans to the exporter immediately.
+//
+// This SpanProcessor is not recommended for production use. The synchronous
+// nature of this SpanProcessor makes it good for testing, debugging, or showing
+// examples of other features, but it will be slow and have a high computation
+// resource usage overhead. The BatchSpanProcessor is recommended for production
+// use instead.
+func NewSimpleSpanProcessor(exporter SpanExporter) SpanProcessor {
+ ssp := &simpleSpanProcessor{
+ exporter: exporter,
+ }
+ global.Warn("SimpleSpanProcessor is not recommended for production use, consider using BatchSpanProcessor instead.")
+
+ return ssp
+}
+
+// OnStart does nothing.
+func (ssp *simpleSpanProcessor) OnStart(context.Context, ReadWriteSpan) {}
+
+// OnEnd immediately exports a ReadOnlySpan.
+func (ssp *simpleSpanProcessor) OnEnd(s ReadOnlySpan) {
+ ssp.exporterMu.Lock()
+ defer ssp.exporterMu.Unlock()
+
+ if ssp.exporter != nil && s.SpanContext().TraceFlags().IsSampled() {
+ if err := ssp.exporter.ExportSpans(context.Background(), []ReadOnlySpan{s}); err != nil {
+ otel.Handle(err)
+ }
+ }
+}
+
+// Shutdown shuts down the exporter this SimpleSpanProcessor exports to.
+func (ssp *simpleSpanProcessor) Shutdown(ctx context.Context) error {
+ var err error
+ ssp.stopOnce.Do(func() {
+ stopFunc := func(exp SpanExporter) (<-chan error, func()) {
+ done := make(chan error)
+ return done, func() { done <- exp.Shutdown(ctx) }
+ }
+
+ // The exporter field of the simpleSpanProcessor needs to be zeroed to
+ // signal it is shut down, meaning all subsequent calls to OnEnd will
+ // be gracefully ignored. This needs to be done synchronously to avoid
+ // any race condition.
+ //
+ // A closure is used to keep reference to the exporter and then the
+ // field is zeroed. This ensures the simpleSpanProcessor is shut down
+ // before the exporter. This order is important as it avoids a potential
+ // deadlock. If the exporter shut down operation generates a span, that
+ // span would need to be exported. Meaning, OnEnd would be called and
+ // try acquiring the lock that is held here.
+ ssp.exporterMu.Lock()
+ done, shutdown := stopFunc(ssp.exporter)
+ ssp.exporter = nil
+ ssp.exporterMu.Unlock()
+
+ go shutdown()
+
+ // Wait for the exporter to shut down or the deadline to expire.
+ select {
+ case err = <-done:
+ case <-ctx.Done():
+ // It is possible for the exporter to have immediately shut down and
+ // the context to be done simultaneously. In that case this outer
+ // select statement will randomly choose a case. This will result in
+ // a different returned error for similar scenarios. Instead, double
+ // check if the exporter shut down at the same time and return that
+ // error if so. This will ensure consistency as well as ensure
+ // the caller knows the exporter shut down successfully (they can
+ // already determine if the deadline is expired given they passed
+ // the context).
+ select {
+ case err = <-done:
+ default:
+ err = ctx.Err()
+ }
+ }
+ })
+ return err
+}
+
+// ForceFlush does nothing as there is no data to flush.
+func (ssp *simpleSpanProcessor) ForceFlush(context.Context) error {
+ return nil
+}
+
+// MarshalLog is the marshaling function used by the logging system to represent
+// this Span Processor.
+func (ssp *simpleSpanProcessor) MarshalLog() interface{} {
+ return struct {
+ Type string
+ Exporter SpanExporter
+ }{
+ Type: "SimpleSpanProcessor",
+ Exporter: ssp.exporter,
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go
new file mode 100644
index 000000000..d511d0f27
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go
@@ -0,0 +1,133 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+ "time"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/instrumentation"
+ "go.opentelemetry.io/otel/sdk/resource"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// snapshot is an record of a spans state at a particular checkpointed time.
+// It is used as a read-only representation of that state.
+type snapshot struct {
+ name string
+ spanContext trace.SpanContext
+ parent trace.SpanContext
+ spanKind trace.SpanKind
+ startTime time.Time
+ endTime time.Time
+ attributes []attribute.KeyValue
+ events []Event
+ links []Link
+ status Status
+ childSpanCount int
+ droppedAttributeCount int
+ droppedEventCount int
+ droppedLinkCount int
+ resource *resource.Resource
+ instrumentationScope instrumentation.Scope
+}
+
+var _ ReadOnlySpan = snapshot{}
+
+func (s snapshot) private() {}
+
+// Name returns the name of the span.
+func (s snapshot) Name() string {
+ return s.name
+}
+
+// SpanContext returns the unique SpanContext that identifies the span.
+func (s snapshot) SpanContext() trace.SpanContext {
+ return s.spanContext
+}
+
+// Parent returns the unique SpanContext that identifies the parent of the
+// span if one exists. If the span has no parent the returned SpanContext
+// will be invalid.
+func (s snapshot) Parent() trace.SpanContext {
+ return s.parent
+}
+
+// SpanKind returns the role the span plays in a Trace.
+func (s snapshot) SpanKind() trace.SpanKind {
+ return s.spanKind
+}
+
+// StartTime returns the time the span started recording.
+func (s snapshot) StartTime() time.Time {
+ return s.startTime
+}
+
+// EndTime returns the time the span stopped recording. It will be zero if
+// the span has not ended.
+func (s snapshot) EndTime() time.Time {
+ return s.endTime
+}
+
+// Attributes returns the defining attributes of the span.
+func (s snapshot) Attributes() []attribute.KeyValue {
+ return s.attributes
+}
+
+// Links returns all the links the span has to other spans.
+func (s snapshot) Links() []Link {
+ return s.links
+}
+
+// Events returns all the events that occurred within in the spans
+// lifetime.
+func (s snapshot) Events() []Event {
+ return s.events
+}
+
+// Status returns the spans status.
+func (s snapshot) Status() Status {
+ return s.status
+}
+
+// InstrumentationScope returns information about the instrumentation
+// scope that created the span.
+func (s snapshot) InstrumentationScope() instrumentation.Scope {
+ return s.instrumentationScope
+}
+
+// InstrumentationLibrary returns information about the instrumentation
+// library that created the span.
+func (s snapshot) InstrumentationLibrary() instrumentation.Library { //nolint:staticcheck // This method needs to be define for backwards compatibility
+ return s.instrumentationScope
+}
+
+// Resource returns information about the entity that produced the span.
+func (s snapshot) Resource() *resource.Resource {
+ return s.resource
+}
+
+// DroppedAttributes returns the number of attributes dropped by the span
+// due to limits being reached.
+func (s snapshot) DroppedAttributes() int {
+ return s.droppedAttributeCount
+}
+
+// DroppedLinks returns the number of links dropped by the span due to limits
+// being reached.
+func (s snapshot) DroppedLinks() int {
+ return s.droppedLinkCount
+}
+
+// DroppedEvents returns the number of events dropped by the span due to
+// limits being reached.
+func (s snapshot) DroppedEvents() int {
+ return s.droppedEventCount
+}
+
+// ChildSpanCount returns the count of spans that consider the span a
+// direct parent.
+func (s snapshot) ChildSpanCount() int {
+ return s.childSpanCount
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go
new file mode 100644
index 000000000..8f4fc3850
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go
@@ -0,0 +1,937 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+ "context"
+ "fmt"
+ "reflect"
+ "runtime"
+ rt "runtime/trace"
+ "slices"
+ "strings"
+ "sync"
+ "time"
+ "unicode/utf8"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ "go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/sdk/instrumentation"
+ "go.opentelemetry.io/otel/sdk/resource"
+ semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
+ "go.opentelemetry.io/otel/trace"
+ "go.opentelemetry.io/otel/trace/embedded"
+)
+
+// ReadOnlySpan allows reading information from the data structure underlying a
+// trace.Span. It is used in places where reading information from a span is
+// necessary but changing the span isn't necessary or allowed.
+//
+// Warning: methods may be added to this interface in minor releases.
+type ReadOnlySpan interface {
+ // Name returns the name of the span.
+ Name() string
+ // SpanContext returns the unique SpanContext that identifies the span.
+ SpanContext() trace.SpanContext
+ // Parent returns the unique SpanContext that identifies the parent of the
+ // span if one exists. If the span has no parent the returned SpanContext
+ // will be invalid.
+ Parent() trace.SpanContext
+ // SpanKind returns the role the span plays in a Trace.
+ SpanKind() trace.SpanKind
+ // StartTime returns the time the span started recording.
+ StartTime() time.Time
+ // EndTime returns the time the span stopped recording. It will be zero if
+ // the span has not ended.
+ EndTime() time.Time
+ // Attributes returns the defining attributes of the span.
+ // The order of the returned attributes is not guaranteed to be stable across invocations.
+ Attributes() []attribute.KeyValue
+ // Links returns all the links the span has to other spans.
+ Links() []Link
+ // Events returns all the events that occurred within in the spans
+ // lifetime.
+ Events() []Event
+ // Status returns the spans status.
+ Status() Status
+ // InstrumentationScope returns information about the instrumentation
+ // scope that created the span.
+ InstrumentationScope() instrumentation.Scope
+ // InstrumentationLibrary returns information about the instrumentation
+ // library that created the span.
+ // Deprecated: please use InstrumentationScope instead.
+ InstrumentationLibrary() instrumentation.Library //nolint:staticcheck // This method needs to be define for backwards compatibility
+ // Resource returns information about the entity that produced the span.
+ Resource() *resource.Resource
+ // DroppedAttributes returns the number of attributes dropped by the span
+ // due to limits being reached.
+ DroppedAttributes() int
+ // DroppedLinks returns the number of links dropped by the span due to
+ // limits being reached.
+ DroppedLinks() int
+ // DroppedEvents returns the number of events dropped by the span due to
+ // limits being reached.
+ DroppedEvents() int
+ // ChildSpanCount returns the count of spans that consider the span a
+ // direct parent.
+ ChildSpanCount() int
+
+ // A private method to prevent users implementing the
+ // interface and so future additions to it will not
+ // violate compatibility.
+ private()
+}
+
+// ReadWriteSpan exposes the same methods as trace.Span and in addition allows
+// reading information from the underlying data structure.
+// This interface exposes the union of the methods of trace.Span (which is a
+// "write-only" span) and ReadOnlySpan. New methods for writing or reading span
+// information should be added under trace.Span or ReadOnlySpan, respectively.
+//
+// Warning: methods may be added to this interface in minor releases.
+type ReadWriteSpan interface {
+ trace.Span
+ ReadOnlySpan
+}
+
+// recordingSpan is an implementation of the OpenTelemetry Span API
+// representing the individual component of a trace that is sampled.
+type recordingSpan struct {
+ embedded.Span
+
+ // mu protects the contents of this span.
+ mu sync.Mutex
+
+ // parent holds the parent span of this span as a trace.SpanContext.
+ parent trace.SpanContext
+
+ // spanKind represents the kind of this span as a trace.SpanKind.
+ spanKind trace.SpanKind
+
+ // name is the name of this span.
+ name string
+
+ // startTime is the time at which this span was started.
+ startTime time.Time
+
+ // endTime is the time at which this span was ended. It contains the zero
+ // value of time.Time until the span is ended.
+ endTime time.Time
+
+ // status is the status of this span.
+ status Status
+
+ // childSpanCount holds the number of child spans created for this span.
+ childSpanCount int
+
+ // spanContext holds the SpanContext of this span.
+ spanContext trace.SpanContext
+
+ // attributes is a collection of user provided key/values. The collection
+ // is constrained by a configurable maximum held by the parent
+ // TracerProvider. When additional attributes are added after this maximum
+ // is reached these attributes the user is attempting to add are dropped.
+ // This dropped number of attributes is tracked and reported in the
+ // ReadOnlySpan exported when the span ends.
+ attributes []attribute.KeyValue
+ droppedAttributes int
+ logDropAttrsOnce sync.Once
+
+ // events are stored in FIFO queue capped by configured limit.
+ events evictedQueue[Event]
+
+ // links are stored in FIFO queue capped by configured limit.
+ links evictedQueue[Link]
+
+ // executionTracerTaskEnd ends the execution tracer span.
+ executionTracerTaskEnd func()
+
+ // tracer is the SDK tracer that created this span.
+ tracer *tracer
+}
+
+var (
+ _ ReadWriteSpan = (*recordingSpan)(nil)
+ _ runtimeTracer = (*recordingSpan)(nil)
+)
+
+// SpanContext returns the SpanContext of this span.
+func (s *recordingSpan) SpanContext() trace.SpanContext {
+ if s == nil {
+ return trace.SpanContext{}
+ }
+ return s.spanContext
+}
+
+// IsRecording returns if this span is being recorded. If this span has ended
+// this will return false.
+func (s *recordingSpan) IsRecording() bool {
+ if s == nil {
+ return false
+ }
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ return s.isRecording()
+}
+
+// isRecording returns if this span is being recorded. If this span has ended
+// this will return false.
+//
+// This method assumes s.mu.Lock is held by the caller.
+func (s *recordingSpan) isRecording() bool {
+ if s == nil {
+ return false
+ }
+ return s.endTime.IsZero()
+}
+
+// SetStatus sets the status of the Span in the form of a code and a
+// description, overriding previous values set. The description is only
+// included in the set status when the code is for an error. If this span is
+// not being recorded than this method does nothing.
+func (s *recordingSpan) SetStatus(code codes.Code, description string) {
+ if s == nil {
+ return
+ }
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if !s.isRecording() {
+ return
+ }
+ if s.status.Code > code {
+ return
+ }
+
+ status := Status{Code: code}
+ if code == codes.Error {
+ status.Description = description
+ }
+
+ s.status = status
+}
+
+// SetAttributes sets attributes of this span.
+//
+// If a key from attributes already exists the value associated with that key
+// will be overwritten with the value contained in attributes.
+//
+// If this span is not being recorded than this method does nothing.
+//
+// If adding attributes to the span would exceed the maximum amount of
+// attributes the span is configured to have, the last added attributes will
+// be dropped.
+func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) {
+ if s == nil || len(attributes) == 0 {
+ return
+ }
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if !s.isRecording() {
+ return
+ }
+
+ limit := s.tracer.provider.spanLimits.AttributeCountLimit
+ if limit == 0 {
+ // No attributes allowed.
+ s.addDroppedAttr(len(attributes))
+ return
+ }
+
+ // If adding these attributes could exceed the capacity of s perform a
+ // de-duplication and truncation while adding to avoid over allocation.
+ if limit > 0 && len(s.attributes)+len(attributes) > limit {
+ s.addOverCapAttrs(limit, attributes)
+ return
+ }
+
+ // Otherwise, add without deduplication. When attributes are read they
+ // will be deduplicated, optimizing the operation.
+ s.attributes = slices.Grow(s.attributes, len(attributes))
+ for _, a := range attributes {
+ if !a.Valid() {
+ // Drop all invalid attributes.
+ s.addDroppedAttr(1)
+ continue
+ }
+ a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a)
+ s.attributes = append(s.attributes, a)
+ }
+}
+
+// Declared as a var so tests can override.
+var logDropAttrs = func() {
+ global.Warn("limit reached: dropping trace Span attributes")
+}
+
+// addDroppedAttr adds incr to the count of dropped attributes.
+//
+// The first, and only the first, time this method is called a warning will be
+// logged.
+//
+// This method assumes s.mu.Lock is held by the caller.
+func (s *recordingSpan) addDroppedAttr(incr int) {
+ s.droppedAttributes += incr
+ s.logDropAttrsOnce.Do(logDropAttrs)
+}
+
+// addOverCapAttrs adds the attributes attrs to the span s while
+// de-duplicating the attributes of s and attrs and dropping attributes that
+// exceed the limit.
+//
+// This method assumes s.mu.Lock is held by the caller.
+//
+// This method should only be called when there is a possibility that adding
+// attrs to s will exceed the limit. Otherwise, attrs should be added to s
+// without checking for duplicates and all retrieval methods of the attributes
+// for s will de-duplicate as needed.
+//
+// This method assumes limit is a value > 0. The argument should be validated
+// by the caller.
+func (s *recordingSpan) addOverCapAttrs(limit int, attrs []attribute.KeyValue) {
+ // In order to not allocate more capacity to s.attributes than needed,
+ // prune and truncate this addition of attributes while adding.
+
+ // Do not set a capacity when creating this map. Benchmark testing has
+ // showed this to only add unused memory allocations in general use.
+ exists := make(map[attribute.Key]int, len(s.attributes))
+ s.dedupeAttrsFromRecord(exists)
+
+ // Now that s.attributes is deduplicated, adding unique attributes up to
+ // the capacity of s will not over allocate s.attributes.
+
+ // max size = limit
+ maxCap := min(len(attrs)+len(s.attributes), limit)
+ if cap(s.attributes) < maxCap {
+ s.attributes = slices.Grow(s.attributes, maxCap-cap(s.attributes))
+ }
+ for _, a := range attrs {
+ if !a.Valid() {
+ // Drop all invalid attributes.
+ s.addDroppedAttr(1)
+ continue
+ }
+
+ if idx, ok := exists[a.Key]; ok {
+ // Perform all updates before dropping, even when at capacity.
+ a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a)
+ s.attributes[idx] = a
+ continue
+ }
+
+ if len(s.attributes) >= limit {
+ // Do not just drop all of the remaining attributes, make sure
+ // updates are checked and performed.
+ s.addDroppedAttr(1)
+ } else {
+ a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a)
+ s.attributes = append(s.attributes, a)
+ exists[a.Key] = len(s.attributes) - 1
+ }
+ }
+}
+
+// truncateAttr returns a truncated version of attr. Only string and string
+// slice attribute values are truncated. String values are truncated to at
+// most a length of limit. Each string slice value is truncated in this fashion
+// (the slice length itself is unaffected).
+//
+// No truncation is performed for a negative limit.
+func truncateAttr(limit int, attr attribute.KeyValue) attribute.KeyValue {
+ if limit < 0 {
+ return attr
+ }
+ switch attr.Value.Type() {
+ case attribute.STRING:
+ v := attr.Value.AsString()
+ return attr.Key.String(truncate(limit, v))
+ case attribute.STRINGSLICE:
+ v := attr.Value.AsStringSlice()
+ for i := range v {
+ v[i] = truncate(limit, v[i])
+ }
+ return attr.Key.StringSlice(v)
+ }
+ return attr
+}
+
+// truncate returns a truncated version of s such that it contains less than
+// the limit number of characters. Truncation is applied by returning the limit
+// number of valid characters contained in s.
+//
+// If limit is negative, it returns the original string.
+//
+// UTF-8 is supported. When truncating, all invalid characters are dropped
+// before applying truncation.
+//
+// If s already contains less than the limit number of bytes, it is returned
+// unchanged. No invalid characters are removed.
+func truncate(limit int, s string) string {
+ // This prioritize performance in the following order based on the most
+ // common expected use-cases.
+ //
+ // - Short values less than the default limit (128).
+ // - Strings with valid encodings that exceed the limit.
+ // - No limit.
+ // - Strings with invalid encodings that exceed the limit.
+ if limit < 0 || len(s) <= limit {
+ return s
+ }
+
+ // Optimistically, assume all valid UTF-8.
+ var b strings.Builder
+ count := 0
+ for i, c := range s {
+ if c != utf8.RuneError {
+ count++
+ if count > limit {
+ return s[:i]
+ }
+ continue
+ }
+
+ _, size := utf8.DecodeRuneInString(s[i:])
+ if size == 1 {
+ // Invalid encoding.
+ b.Grow(len(s) - 1)
+ _, _ = b.WriteString(s[:i])
+ s = s[i:]
+ break
+ }
+ }
+
+ // Fast-path, no invalid input.
+ if b.Cap() == 0 {
+ return s
+ }
+
+ // Truncate while validating UTF-8.
+ for i := 0; i < len(s) && count < limit; {
+ c := s[i]
+ if c < utf8.RuneSelf {
+ // Optimization for single byte runes (common case).
+ _ = b.WriteByte(c)
+ i++
+ count++
+ continue
+ }
+
+ _, size := utf8.DecodeRuneInString(s[i:])
+ if size == 1 {
+ // We checked for all 1-byte runes above, this is a RuneError.
+ i++
+ continue
+ }
+
+ _, _ = b.WriteString(s[i : i+size])
+ i += size
+ count++
+ }
+
+ return b.String()
+}
+
+// End ends the span. This method does nothing if the span is already ended or
+// is not being recorded.
+//
+// The only SpanEndOption currently supported are [trace.WithTimestamp], and
+// [trace.WithStackTrace].
+//
+// If this method is called while panicking an error event is added to the
+// Span before ending it and the panic is continued.
+func (s *recordingSpan) End(options ...trace.SpanEndOption) {
+ // Do not start by checking if the span is being recorded which requires
+ // acquiring a lock. Make a minimal check that the span is not nil.
+ if s == nil {
+ return
+ }
+
+ // Store the end time as soon as possible to avoid artificially increasing
+ // the span's duration in case some operation below takes a while.
+ et := monotonicEndTime(s.startTime)
+
+ // Lock the span now that we have an end time and see if we need to do any more processing.
+ s.mu.Lock()
+ if !s.isRecording() {
+ s.mu.Unlock()
+ return
+ }
+
+ config := trace.NewSpanEndConfig(options...)
+ if recovered := recover(); recovered != nil {
+ // Record but don't stop the panic.
+ defer panic(recovered)
+ opts := []trace.EventOption{
+ trace.WithAttributes(
+ semconv.ExceptionType(typeStr(recovered)),
+ semconv.ExceptionMessage(fmt.Sprint(recovered)),
+ ),
+ }
+
+ if config.StackTrace() {
+ opts = append(opts, trace.WithAttributes(
+ semconv.ExceptionStacktrace(recordStackTrace()),
+ ))
+ }
+
+ s.addEvent(semconv.ExceptionEventName, opts...)
+ }
+
+ if s.executionTracerTaskEnd != nil {
+ s.mu.Unlock()
+ s.executionTracerTaskEnd()
+ s.mu.Lock()
+ }
+
+ // Setting endTime to non-zero marks the span as ended and not recording.
+ if config.Timestamp().IsZero() {
+ s.endTime = et
+ } else {
+ s.endTime = config.Timestamp()
+ }
+ s.mu.Unlock()
+
+ sps := s.tracer.provider.getSpanProcessors()
+ if len(sps) == 0 {
+ return
+ }
+ snap := s.snapshot()
+ for _, sp := range sps {
+ sp.sp.OnEnd(snap)
+ }
+}
+
+// monotonicEndTime returns the end time at present but offset from start,
+// monotonically.
+//
+// The monotonic clock is used in subtractions hence the duration since start
+// added back to start gives end as a monotonic time. See
+// https://golang.org/pkg/time/#hdr-Monotonic_Clocks
+func monotonicEndTime(start time.Time) time.Time {
+ return start.Add(time.Since(start))
+}
+
+// RecordError will record err as a span event for this span. An additional call to
+// SetStatus is required if the Status of the Span should be set to Error, this method
+// does not change the Span status. If this span is not being recorded or err is nil
+// than this method does nothing.
+func (s *recordingSpan) RecordError(err error, opts ...trace.EventOption) {
+ if s == nil || err == nil {
+ return
+ }
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if !s.isRecording() {
+ return
+ }
+
+ opts = append(opts, trace.WithAttributes(
+ semconv.ExceptionType(typeStr(err)),
+ semconv.ExceptionMessage(err.Error()),
+ ))
+
+ c := trace.NewEventConfig(opts...)
+ if c.StackTrace() {
+ opts = append(opts, trace.WithAttributes(
+ semconv.ExceptionStacktrace(recordStackTrace()),
+ ))
+ }
+
+ s.addEvent(semconv.ExceptionEventName, opts...)
+}
+
+func typeStr(i interface{}) string {
+ t := reflect.TypeOf(i)
+ if t.PkgPath() == "" && t.Name() == "" {
+ // Likely a builtin type.
+ return t.String()
+ }
+ return fmt.Sprintf("%s.%s", t.PkgPath(), t.Name())
+}
+
+func recordStackTrace() string {
+ stackTrace := make([]byte, 2048)
+ n := runtime.Stack(stackTrace, false)
+
+ return string(stackTrace[0:n])
+}
+
+// AddEvent adds an event with the provided name and options. If this span is
+// not being recorded then this method does nothing.
+func (s *recordingSpan) AddEvent(name string, o ...trace.EventOption) {
+ if s == nil {
+ return
+ }
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if !s.isRecording() {
+ return
+ }
+ s.addEvent(name, o...)
+}
+
+// addEvent adds an event with the provided name and options.
+//
+// This method assumes s.mu.Lock is held by the caller.
+func (s *recordingSpan) addEvent(name string, o ...trace.EventOption) {
+ c := trace.NewEventConfig(o...)
+ e := Event{Name: name, Attributes: c.Attributes(), Time: c.Timestamp()}
+
+ // Discard attributes over limit.
+ limit := s.tracer.provider.spanLimits.AttributePerEventCountLimit
+ if limit == 0 {
+ // Drop all attributes.
+ e.DroppedAttributeCount = len(e.Attributes)
+ e.Attributes = nil
+ } else if limit > 0 && len(e.Attributes) > limit {
+ // Drop over capacity.
+ e.DroppedAttributeCount = len(e.Attributes) - limit
+ e.Attributes = e.Attributes[:limit]
+ }
+
+ s.events.add(e)
+}
+
+// SetName sets the name of this span. If this span is not being recorded than
+// this method does nothing.
+func (s *recordingSpan) SetName(name string) {
+ if s == nil {
+ return
+ }
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if !s.isRecording() {
+ return
+ }
+ s.name = name
+}
+
+// Name returns the name of this span.
+func (s *recordingSpan) Name() string {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.name
+}
+
+// Name returns the SpanContext of this span's parent span.
+func (s *recordingSpan) Parent() trace.SpanContext {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.parent
+}
+
+// SpanKind returns the SpanKind of this span.
+func (s *recordingSpan) SpanKind() trace.SpanKind {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.spanKind
+}
+
+// StartTime returns the time this span started.
+func (s *recordingSpan) StartTime() time.Time {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.startTime
+}
+
+// EndTime returns the time this span ended. For spans that have not yet
+// ended, the returned value will be the zero value of time.Time.
+func (s *recordingSpan) EndTime() time.Time {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.endTime
+}
+
+// Attributes returns the attributes of this span.
+//
+// The order of the returned attributes is not guaranteed to be stable.
+func (s *recordingSpan) Attributes() []attribute.KeyValue {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ s.dedupeAttrs()
+ return s.attributes
+}
+
+// dedupeAttrs deduplicates the attributes of s to fit capacity.
+//
+// This method assumes s.mu.Lock is held by the caller.
+func (s *recordingSpan) dedupeAttrs() {
+ // Do not set a capacity when creating this map. Benchmark testing has
+ // showed this to only add unused memory allocations in general use.
+ exists := make(map[attribute.Key]int, len(s.attributes))
+ s.dedupeAttrsFromRecord(exists)
+}
+
+// dedupeAttrsFromRecord deduplicates the attributes of s to fit capacity
+// using record as the record of unique attribute keys to their index.
+//
+// This method assumes s.mu.Lock is held by the caller.
+func (s *recordingSpan) dedupeAttrsFromRecord(record map[attribute.Key]int) {
+ // Use the fact that slices share the same backing array.
+ unique := s.attributes[:0]
+ for _, a := range s.attributes {
+ if idx, ok := record[a.Key]; ok {
+ unique[idx] = a
+ } else {
+ unique = append(unique, a)
+ record[a.Key] = len(unique) - 1
+ }
+ }
+ clear(s.attributes[len(unique):]) // Erase unneeded elements to let GC collect objects.
+ s.attributes = unique
+}
+
+// Links returns the links of this span.
+func (s *recordingSpan) Links() []Link {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if len(s.links.queue) == 0 {
+ return []Link{}
+ }
+ return s.links.copy()
+}
+
+// Events returns the events of this span.
+func (s *recordingSpan) Events() []Event {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if len(s.events.queue) == 0 {
+ return []Event{}
+ }
+ return s.events.copy()
+}
+
+// Status returns the status of this span.
+func (s *recordingSpan) Status() Status {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.status
+}
+
+// InstrumentationScope returns the instrumentation.Scope associated with
+// the Tracer that created this span.
+func (s *recordingSpan) InstrumentationScope() instrumentation.Scope {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.tracer.instrumentationScope
+}
+
+// InstrumentationLibrary returns the instrumentation.Library associated with
+// the Tracer that created this span.
+func (s *recordingSpan) InstrumentationLibrary() instrumentation.Library { //nolint:staticcheck // This method needs to be define for backwards compatibility
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.tracer.instrumentationScope
+}
+
+// Resource returns the Resource associated with the Tracer that created this
+// span.
+func (s *recordingSpan) Resource() *resource.Resource {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.tracer.provider.resource
+}
+
+func (s *recordingSpan) AddLink(link trace.Link) {
+ if s == nil {
+ return
+ }
+ if !link.SpanContext.IsValid() && len(link.Attributes) == 0 &&
+ link.SpanContext.TraceState().Len() == 0 {
+ return
+ }
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if !s.isRecording() {
+ return
+ }
+
+ l := Link{SpanContext: link.SpanContext, Attributes: link.Attributes}
+
+ // Discard attributes over limit.
+ limit := s.tracer.provider.spanLimits.AttributePerLinkCountLimit
+ if limit == 0 {
+ // Drop all attributes.
+ l.DroppedAttributeCount = len(l.Attributes)
+ l.Attributes = nil
+ } else if limit > 0 && len(l.Attributes) > limit {
+ l.DroppedAttributeCount = len(l.Attributes) - limit
+ l.Attributes = l.Attributes[:limit]
+ }
+
+ s.links.add(l)
+}
+
+// DroppedAttributes returns the number of attributes dropped by the span
+// due to limits being reached.
+func (s *recordingSpan) DroppedAttributes() int {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.droppedAttributes
+}
+
+// DroppedLinks returns the number of links dropped by the span due to limits
+// being reached.
+func (s *recordingSpan) DroppedLinks() int {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.links.droppedCount
+}
+
+// DroppedEvents returns the number of events dropped by the span due to
+// limits being reached.
+func (s *recordingSpan) DroppedEvents() int {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.events.droppedCount
+}
+
+// ChildSpanCount returns the count of spans that consider the span a
+// direct parent.
+func (s *recordingSpan) ChildSpanCount() int {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.childSpanCount
+}
+
+// TracerProvider returns a trace.TracerProvider that can be used to generate
+// additional Spans on the same telemetry pipeline as the current Span.
+func (s *recordingSpan) TracerProvider() trace.TracerProvider {
+ return s.tracer.provider
+}
+
+// snapshot creates a read-only copy of the current state of the span.
+func (s *recordingSpan) snapshot() ReadOnlySpan {
+ var sd snapshot
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ sd.endTime = s.endTime
+ sd.instrumentationScope = s.tracer.instrumentationScope
+ sd.name = s.name
+ sd.parent = s.parent
+ sd.resource = s.tracer.provider.resource
+ sd.spanContext = s.spanContext
+ sd.spanKind = s.spanKind
+ sd.startTime = s.startTime
+ sd.status = s.status
+ sd.childSpanCount = s.childSpanCount
+
+ if len(s.attributes) > 0 {
+ s.dedupeAttrs()
+ sd.attributes = s.attributes
+ }
+ sd.droppedAttributeCount = s.droppedAttributes
+ if len(s.events.queue) > 0 {
+ sd.events = s.events.copy()
+ sd.droppedEventCount = s.events.droppedCount
+ }
+ if len(s.links.queue) > 0 {
+ sd.links = s.links.copy()
+ sd.droppedLinkCount = s.links.droppedCount
+ }
+ return &sd
+}
+
+func (s *recordingSpan) addChild() {
+ if s == nil {
+ return
+ }
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if !s.isRecording() {
+ return
+ }
+ s.childSpanCount++
+}
+
+func (*recordingSpan) private() {}
+
+// runtimeTrace starts a "runtime/trace".Task for the span and returns a
+// context containing the task.
+func (s *recordingSpan) runtimeTrace(ctx context.Context) context.Context {
+ if !rt.IsEnabled() {
+ // Avoid additional overhead if runtime/trace is not enabled.
+ return ctx
+ }
+ nctx, task := rt.NewTask(ctx, s.name)
+
+ s.mu.Lock()
+ s.executionTracerTaskEnd = task.End
+ s.mu.Unlock()
+
+ return nctx
+}
+
+// nonRecordingSpan is a minimal implementation of the OpenTelemetry Span API
+// that wraps a SpanContext. It performs no operations other than to return
+// the wrapped SpanContext or TracerProvider that created it.
+type nonRecordingSpan struct {
+ embedded.Span
+
+ // tracer is the SDK tracer that created this span.
+ tracer *tracer
+ sc trace.SpanContext
+}
+
+var _ trace.Span = nonRecordingSpan{}
+
+// SpanContext returns the wrapped SpanContext.
+func (s nonRecordingSpan) SpanContext() trace.SpanContext { return s.sc }
+
+// IsRecording always returns false.
+func (nonRecordingSpan) IsRecording() bool { return false }
+
+// SetStatus does nothing.
+func (nonRecordingSpan) SetStatus(codes.Code, string) {}
+
+// SetError does nothing.
+func (nonRecordingSpan) SetError(bool) {}
+
+// SetAttributes does nothing.
+func (nonRecordingSpan) SetAttributes(...attribute.KeyValue) {}
+
+// End does nothing.
+func (nonRecordingSpan) End(...trace.SpanEndOption) {}
+
+// RecordError does nothing.
+func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {}
+
+// AddEvent does nothing.
+func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {}
+
+// AddLink does nothing.
+func (nonRecordingSpan) AddLink(trace.Link) {}
+
+// SetName does nothing.
+func (nonRecordingSpan) SetName(string) {}
+
+// TracerProvider returns the trace.TracerProvider that provided the Tracer
+// that created this span.
+func (s nonRecordingSpan) TracerProvider() trace.TracerProvider { return s.tracer.provider }
+
+func isRecording(s SamplingResult) bool {
+ return s.Decision == RecordOnly || s.Decision == RecordAndSample
+}
+
+func isSampled(s SamplingResult) bool {
+ return s.Decision == RecordAndSample
+}
+
+// Status is the classified state of a Span.
+type Status struct {
+ // Code is an identifier of a Spans state classification.
+ Code codes.Code
+ // Description is a user hint about why that status was set. It is only
+ // applicable when Code is Error.
+ Description string
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go
new file mode 100644
index 000000000..6bdda3d94
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go
@@ -0,0 +1,36 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import "context"
+
+// SpanExporter handles the delivery of spans to external receivers. This is
+// the final component in the trace export pipeline.
+type SpanExporter interface {
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // ExportSpans exports a batch of spans.
+ //
+ // This function is called synchronously, so there is no concurrency
+ // safety requirement. However, due to the synchronous calling pattern,
+ // it is critical that all timeouts and cancellations contained in the
+ // passed context must be honored.
+ //
+ // Any retry logic must be contained in this function. The SDK that
+ // calls this function will not implement any retry logic. All errors
+ // returned by this function are considered unrecoverable and will be
+ // reported to a configured error Handler.
+ ExportSpans(ctx context.Context, spans []ReadOnlySpan) error
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Shutdown notifies the exporter of a pending halt to operations. The
+ // exporter is expected to perform any cleanup or synchronization it
+ // requires while honoring all timeouts and cancellations contained in
+ // the passed context.
+ Shutdown(ctx context.Context) error
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go
new file mode 100644
index 000000000..bec5e2097
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go
@@ -0,0 +1,114 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import "go.opentelemetry.io/otel/sdk/internal/env"
+
+const (
+ // DefaultAttributeValueLengthLimit is the default maximum allowed
+ // attribute value length, unlimited.
+ DefaultAttributeValueLengthLimit = -1
+
+ // DefaultAttributeCountLimit is the default maximum number of attributes
+ // a span can have.
+ DefaultAttributeCountLimit = 128
+
+ // DefaultEventCountLimit is the default maximum number of events a span
+ // can have.
+ DefaultEventCountLimit = 128
+
+ // DefaultLinkCountLimit is the default maximum number of links a span can
+ // have.
+ DefaultLinkCountLimit = 128
+
+ // DefaultAttributePerEventCountLimit is the default maximum number of
+ // attributes a span event can have.
+ DefaultAttributePerEventCountLimit = 128
+
+ // DefaultAttributePerLinkCountLimit is the default maximum number of
+ // attributes a span link can have.
+ DefaultAttributePerLinkCountLimit = 128
+)
+
+// SpanLimits represents the limits of a span.
+type SpanLimits struct {
+ // AttributeValueLengthLimit is the maximum allowed attribute value length.
+ //
+ // This limit only applies to string and string slice attribute values.
+ // Any string longer than this value will be truncated to this length.
+ //
+ // Setting this to a negative value means no limit is applied.
+ AttributeValueLengthLimit int
+
+ // AttributeCountLimit is the maximum allowed span attribute count. Any
+ // attribute added to a span once this limit is reached will be dropped.
+ //
+ // Setting this to zero means no attributes will be recorded.
+ //
+ // Setting this to a negative value means no limit is applied.
+ AttributeCountLimit int
+
+ // EventCountLimit is the maximum allowed span event count. Any event
+ // added to a span once this limit is reached means it will be added but
+ // the oldest event will be dropped.
+ //
+ // Setting this to zero means no events we be recorded.
+ //
+ // Setting this to a negative value means no limit is applied.
+ EventCountLimit int
+
+ // LinkCountLimit is the maximum allowed span link count. Any link added
+ // to a span once this limit is reached means it will be added but the
+ // oldest link will be dropped.
+ //
+ // Setting this to zero means no links we be recorded.
+ //
+ // Setting this to a negative value means no limit is applied.
+ LinkCountLimit int
+
+ // AttributePerEventCountLimit is the maximum number of attributes allowed
+ // per span event. Any attribute added after this limit reached will be
+ // dropped.
+ //
+ // Setting this to zero means no attributes will be recorded for events.
+ //
+ // Setting this to a negative value means no limit is applied.
+ AttributePerEventCountLimit int
+
+ // AttributePerLinkCountLimit is the maximum number of attributes allowed
+ // per span link. Any attribute added after this limit reached will be
+ // dropped.
+ //
+ // Setting this to zero means no attributes will be recorded for links.
+ //
+ // Setting this to a negative value means no limit is applied.
+ AttributePerLinkCountLimit int
+}
+
+// NewSpanLimits returns a SpanLimits with all limits set to the value their
+// corresponding environment variable holds, or the default if unset.
+//
+// • AttributeValueLengthLimit: OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT
+// (default: unlimited)
+//
+// • AttributeCountLimit: OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT (default: 128)
+//
+// • EventCountLimit: OTEL_SPAN_EVENT_COUNT_LIMIT (default: 128)
+//
+// • AttributePerEventCountLimit: OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT (default:
+// 128)
+//
+// • LinkCountLimit: OTEL_SPAN_LINK_COUNT_LIMIT (default: 128)
+//
+// • AttributePerLinkCountLimit: OTEL_LINK_ATTRIBUTE_COUNT_LIMIT (default: 128)
+func NewSpanLimits() SpanLimits {
+ return SpanLimits{
+ AttributeValueLengthLimit: env.SpanAttributeValueLength(DefaultAttributeValueLengthLimit),
+ AttributeCountLimit: env.SpanAttributeCount(DefaultAttributeCountLimit),
+ EventCountLimit: env.SpanEventCount(DefaultEventCountLimit),
+ LinkCountLimit: env.SpanLinkCount(DefaultLinkCountLimit),
+ AttributePerEventCountLimit: env.SpanEventAttributeCount(DefaultAttributePerEventCountLimit),
+ AttributePerLinkCountLimit: env.SpanLinkAttributeCount(DefaultAttributePerLinkCountLimit),
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go
new file mode 100644
index 000000000..af7f9177f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go
@@ -0,0 +1,61 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+ "context"
+ "sync"
+)
+
+// SpanProcessor is a processing pipeline for spans in the trace signal.
+// SpanProcessors registered with a TracerProvider and are called at the start
+// and end of a Span's lifecycle, and are called in the order they are
+// registered.
+type SpanProcessor interface {
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // OnStart is called when a span is started. It is called synchronously
+ // and should not block.
+ OnStart(parent context.Context, s ReadWriteSpan)
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // OnEnd is called when span is finished. It is called synchronously and
+ // hence not block.
+ OnEnd(s ReadOnlySpan)
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Shutdown is called when the SDK shuts down. Any cleanup or release of
+ // resources held by the processor should be done in this call.
+ //
+ // Calls to OnStart, OnEnd, or ForceFlush after this has been called
+ // should be ignored.
+ //
+ // All timeouts and cancellations contained in ctx must be honored, this
+ // should not block indefinitely.
+ Shutdown(ctx context.Context) error
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // ForceFlush exports all ended spans to the configured Exporter that have not yet
+ // been exported. It should only be called when absolutely necessary, such as when
+ // using a FaaS provider that may suspend the process after an invocation, but before
+ // the Processor can export the completed spans.
+ ForceFlush(ctx context.Context) error
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+}
+
+type spanProcessorState struct {
+ sp SpanProcessor
+ state sync.Once
+}
+
+func newSpanProcessorState(sp SpanProcessor) *spanProcessorState {
+ return &spanProcessorState{sp: sp}
+}
+
+type spanProcessorStates []*spanProcessorState
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go
new file mode 100644
index 000000000..43419d3b5
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go
@@ -0,0 +1,153 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+ "context"
+ "time"
+
+ "go.opentelemetry.io/otel/sdk/instrumentation"
+ "go.opentelemetry.io/otel/trace"
+ "go.opentelemetry.io/otel/trace/embedded"
+)
+
+type tracer struct {
+ embedded.Tracer
+
+ provider *TracerProvider
+ instrumentationScope instrumentation.Scope
+}
+
+var _ trace.Tracer = &tracer{}
+
+// Start starts a Span and returns it along with a context containing it.
+//
+// The Span is created with the provided name and as a child of any existing
+// span context found in the passed context. The created Span will be
+// configured appropriately by any SpanOption passed.
+func (tr *tracer) Start(ctx context.Context, name string, options ...trace.SpanStartOption) (context.Context, trace.Span) {
+ config := trace.NewSpanStartConfig(options...)
+
+ if ctx == nil {
+ // Prevent trace.ContextWithSpan from panicking.
+ ctx = context.Background()
+ }
+
+ // For local spans created by this SDK, track child span count.
+ if p := trace.SpanFromContext(ctx); p != nil {
+ if sdkSpan, ok := p.(*recordingSpan); ok {
+ sdkSpan.addChild()
+ }
+ }
+
+ s := tr.newSpan(ctx, name, &config)
+ if rw, ok := s.(ReadWriteSpan); ok && s.IsRecording() {
+ sps := tr.provider.getSpanProcessors()
+ for _, sp := range sps {
+ sp.sp.OnStart(ctx, rw)
+ }
+ }
+ if rtt, ok := s.(runtimeTracer); ok {
+ ctx = rtt.runtimeTrace(ctx)
+ }
+
+ return trace.ContextWithSpan(ctx, s), s
+}
+
+type runtimeTracer interface {
+ // runtimeTrace starts a "runtime/trace".Task for the span and
+ // returns a context containing the task.
+ runtimeTrace(ctx context.Context) context.Context
+}
+
+// newSpan returns a new configured span.
+func (tr *tracer) newSpan(ctx context.Context, name string, config *trace.SpanConfig) trace.Span {
+ // If told explicitly to make this a new root use a zero value SpanContext
+ // as a parent which contains an invalid trace ID and is not remote.
+ var psc trace.SpanContext
+ if config.NewRoot() {
+ ctx = trace.ContextWithSpanContext(ctx, psc)
+ } else {
+ psc = trace.SpanContextFromContext(ctx)
+ }
+
+ // If there is a valid parent trace ID, use it to ensure the continuity of
+ // the trace. Always generate a new span ID so other components can rely
+ // on a unique span ID, even if the Span is non-recording.
+ var tid trace.TraceID
+ var sid trace.SpanID
+ if !psc.TraceID().IsValid() {
+ tid, sid = tr.provider.idGenerator.NewIDs(ctx)
+ } else {
+ tid = psc.TraceID()
+ sid = tr.provider.idGenerator.NewSpanID(ctx, tid)
+ }
+
+ samplingResult := tr.provider.sampler.ShouldSample(SamplingParameters{
+ ParentContext: ctx,
+ TraceID: tid,
+ Name: name,
+ Kind: config.SpanKind(),
+ Attributes: config.Attributes(),
+ Links: config.Links(),
+ })
+
+ scc := trace.SpanContextConfig{
+ TraceID: tid,
+ SpanID: sid,
+ TraceState: samplingResult.Tracestate,
+ }
+ if isSampled(samplingResult) {
+ scc.TraceFlags = psc.TraceFlags() | trace.FlagsSampled
+ } else {
+ scc.TraceFlags = psc.TraceFlags() &^ trace.FlagsSampled
+ }
+ sc := trace.NewSpanContext(scc)
+
+ if !isRecording(samplingResult) {
+ return tr.newNonRecordingSpan(sc)
+ }
+ return tr.newRecordingSpan(psc, sc, name, samplingResult, config)
+}
+
+// newRecordingSpan returns a new configured recordingSpan.
+func (tr *tracer) newRecordingSpan(psc, sc trace.SpanContext, name string, sr SamplingResult, config *trace.SpanConfig) *recordingSpan {
+ startTime := config.Timestamp()
+ if startTime.IsZero() {
+ startTime = time.Now()
+ }
+
+ s := &recordingSpan{
+ // Do not pre-allocate the attributes slice here! Doing so will
+ // allocate memory that is likely never going to be used, or if used,
+ // will be over-sized. The default Go compiler has been tested to
+ // dynamically allocate needed space very well. Benchmarking has shown
+ // it to be more performant than what we can predetermine here,
+ // especially for the common use case of few to no added
+ // attributes.
+
+ parent: psc,
+ spanContext: sc,
+ spanKind: trace.ValidateSpanKind(config.SpanKind()),
+ name: name,
+ startTime: startTime,
+ events: newEvictedQueueEvent(tr.provider.spanLimits.EventCountLimit),
+ links: newEvictedQueueLink(tr.provider.spanLimits.LinkCountLimit),
+ tracer: tr,
+ }
+
+ for _, l := range config.Links() {
+ s.AddLink(l)
+ }
+
+ s.SetAttributes(sr.Attributes...)
+ s.SetAttributes(config.Attributes()...)
+
+ return s
+}
+
+// newNonRecordingSpan returns a new configured nonRecordingSpan.
+func (tr *tracer) newNonRecordingSpan(sc trace.SpanContext) nonRecordingSpan {
+ return nonRecordingSpan{tracer: tr, sc: sc}
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/version.go b/vendor/go.opentelemetry.io/otel/sdk/trace/version.go
new file mode 100644
index 000000000..b84dd2c5e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/version.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+// version is the current release version of the metric SDK in use.
+func version() string {
+ return "1.16.0-rc.1"
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go
new file mode 100644
index 000000000..6b4038510
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/version.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package sdk // import "go.opentelemetry.io/otel/sdk"
+
+// Version is the current release version of the OpenTelemetry SDK in use.
+func Version() string {
+ return "1.34.0"
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md
new file mode 100644
index 000000000..82e1f46b4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md
@@ -0,0 +1,3 @@
+# Semconv v1.20.0
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.20.0)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go
new file mode 100644
index 000000000..6685c392b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go
@@ -0,0 +1,1198 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// Describes HTTP attributes.
+const (
+ // HTTPMethodKey is the attribute Key conforming to the "http.method"
+ // semantic conventions. It represents the hTTP request method.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'GET', 'POST', 'HEAD'
+ HTTPMethodKey = attribute.Key("http.method")
+
+ // HTTPStatusCodeKey is the attribute Key conforming to the
+ // "http.status_code" semantic conventions. It represents the [HTTP
+ // response status code](https://tools.ietf.org/html/rfc7231#section-6).
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If and only if one was
+ // received/sent.)
+ // Stability: stable
+ // Examples: 200
+ HTTPStatusCodeKey = attribute.Key("http.status_code")
+)
+
+// HTTPMethod returns an attribute KeyValue conforming to the "http.method"
+// semantic conventions. It represents the hTTP request method.
+func HTTPMethod(val string) attribute.KeyValue {
+ return HTTPMethodKey.String(val)
+}
+
+// HTTPStatusCode returns an attribute KeyValue conforming to the
+// "http.status_code" semantic conventions. It represents the [HTTP response
+// status code](https://tools.ietf.org/html/rfc7231#section-6).
+func HTTPStatusCode(val int) attribute.KeyValue {
+ return HTTPStatusCodeKey.Int(val)
+}
+
+// HTTP Server spans attributes
+const (
+ // HTTPSchemeKey is the attribute Key conforming to the "http.scheme"
+ // semantic conventions. It represents the URI scheme identifying the used
+ // protocol.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'http', 'https'
+ HTTPSchemeKey = attribute.Key("http.scheme")
+
+ // HTTPRouteKey is the attribute Key conforming to the "http.route"
+ // semantic conventions. It represents the matched route (path template in
+ // the format used by the respective server framework). See note below
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If and only if it's available)
+ // Stability: stable
+ // Examples: '/users/:userID?', '{controller}/{action}/{id?}'
+ // Note: MUST NOT be populated when this is not supported by the HTTP
+ // server framework as the route attribute should have low-cardinality and
+ // the URI path can NOT substitute it.
+ // SHOULD include the [application
+ // root](/specification/trace/semantic_conventions/http.md#http-server-definitions)
+ // if there is one.
+ HTTPRouteKey = attribute.Key("http.route")
+)
+
+// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme"
+// semantic conventions. It represents the URI scheme identifying the used
+// protocol.
+func HTTPScheme(val string) attribute.KeyValue {
+ return HTTPSchemeKey.String(val)
+}
+
+// HTTPRoute returns an attribute KeyValue conforming to the "http.route"
+// semantic conventions. It represents the matched route (path template in the
+// format used by the respective server framework). See note below
+func HTTPRoute(val string) attribute.KeyValue {
+ return HTTPRouteKey.String(val)
+}
+
+// Attributes for Events represented using Log Records.
+const (
+ // EventNameKey is the attribute Key conforming to the "event.name"
+ // semantic conventions. It represents the name identifies the event.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'click', 'exception'
+ EventNameKey = attribute.Key("event.name")
+
+ // EventDomainKey is the attribute Key conforming to the "event.domain"
+ // semantic conventions. It represents the domain identifies the business
+ // context for the events.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ // Note: Events across different domains may have same `event.name`, yet be
+ // unrelated events.
+ EventDomainKey = attribute.Key("event.domain")
+)
+
+var (
+ // Events from browser apps
+ EventDomainBrowser = EventDomainKey.String("browser")
+ // Events from mobile apps
+ EventDomainDevice = EventDomainKey.String("device")
+ // Events from Kubernetes
+ EventDomainK8S = EventDomainKey.String("k8s")
+)
+
+// EventName returns an attribute KeyValue conforming to the "event.name"
+// semantic conventions. It represents the name identifies the event.
+func EventName(val string) attribute.KeyValue {
+ return EventNameKey.String(val)
+}
+
+// These attributes may be used for any network related operation.
+const (
+ // NetTransportKey is the attribute Key conforming to the "net.transport"
+ // semantic conventions. It represents the transport protocol used. See
+ // note below.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ NetTransportKey = attribute.Key("net.transport")
+
+ // NetProtocolNameKey is the attribute Key conforming to the
+ // "net.protocol.name" semantic conventions. It represents the application
+ // layer protocol used. The value SHOULD be normalized to lowercase.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'amqp', 'http', 'mqtt'
+ NetProtocolNameKey = attribute.Key("net.protocol.name")
+
+ // NetProtocolVersionKey is the attribute Key conforming to the
+ // "net.protocol.version" semantic conventions. It represents the version
+ // of the application layer protocol used. See note below.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '3.1.1'
+ // Note: `net.protocol.version` refers to the version of the protocol used
+ // and might be different from the protocol client's version. If the HTTP
+ // client used has a version of `0.27.2`, but sends HTTP version `1.1`,
+ // this attribute should be set to `1.1`.
+ NetProtocolVersionKey = attribute.Key("net.protocol.version")
+
+ // NetSockPeerNameKey is the attribute Key conforming to the
+ // "net.sock.peer.name" semantic conventions. It represents the remote
+ // socket peer name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended (If available and different from
+ // `net.peer.name` and if `net.sock.peer.addr` is set.)
+ // Stability: stable
+ // Examples: 'proxy.example.com'
+ NetSockPeerNameKey = attribute.Key("net.sock.peer.name")
+
+ // NetSockPeerAddrKey is the attribute Key conforming to the
+ // "net.sock.peer.addr" semantic conventions. It represents the remote
+ // socket peer address: IPv4 or IPv6 for internet protocols, path for local
+ // communication,
+ // [etc](https://man7.org/linux/man-pages/man7/address_families.7.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '127.0.0.1', '/tmp/mysql.sock'
+ NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr")
+
+ // NetSockPeerPortKey is the attribute Key conforming to the
+ // "net.sock.peer.port" semantic conventions. It represents the remote
+ // socket peer port.
+ //
+ // Type: int
+ // RequirementLevel: Recommended (If defined for the address family and if
+ // different than `net.peer.port` and if `net.sock.peer.addr` is set.)
+ // Stability: stable
+ // Examples: 16456
+ NetSockPeerPortKey = attribute.Key("net.sock.peer.port")
+
+ // NetSockFamilyKey is the attribute Key conforming to the
+ // "net.sock.family" semantic conventions. It represents the protocol
+ // [address
+ // family](https://man7.org/linux/man-pages/man7/address_families.7.html)
+ // which is used for communication.
+ //
+ // Type: Enum
+ // RequirementLevel: ConditionallyRequired (If different than `inet` and if
+ // any of `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers
+ // of telemetry SHOULD accept both IPv4 and IPv6 formats for the address in
+ // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support
+ // instrumentations that follow previous versions of this document.)
+ // Stability: stable
+ // Examples: 'inet6', 'bluetooth'
+ NetSockFamilyKey = attribute.Key("net.sock.family")
+
+ // NetPeerNameKey is the attribute Key conforming to the "net.peer.name"
+ // semantic conventions. It represents the logical remote hostname, see
+ // note below.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'example.com'
+ // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an
+ // extra DNS lookup.
+ NetPeerNameKey = attribute.Key("net.peer.name")
+
+ // NetPeerPortKey is the attribute Key conforming to the "net.peer.port"
+ // semantic conventions. It represents the logical remote port number
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 80, 8080, 443
+ NetPeerPortKey = attribute.Key("net.peer.port")
+
+ // NetHostNameKey is the attribute Key conforming to the "net.host.name"
+ // semantic conventions. It represents the logical local hostname or
+ // similar, see note below.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'localhost'
+ NetHostNameKey = attribute.Key("net.host.name")
+
+ // NetHostPortKey is the attribute Key conforming to the "net.host.port"
+ // semantic conventions. It represents the logical local port number,
+ // preferably the one that the peer used to connect
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 8080
+ NetHostPortKey = attribute.Key("net.host.port")
+
+ // NetSockHostAddrKey is the attribute Key conforming to the
+ // "net.sock.host.addr" semantic conventions. It represents the local
+ // socket address. Useful in case of a multi-IP host.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '192.168.0.1'
+ NetSockHostAddrKey = attribute.Key("net.sock.host.addr")
+
+ // NetSockHostPortKey is the attribute Key conforming to the
+ // "net.sock.host.port" semantic conventions. It represents the local
+ // socket port number.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If defined for the address
+ // family and if different than `net.host.port` and if `net.sock.host.addr`
+ // is set. In other cases, it is still recommended to set this.)
+ // Stability: stable
+ // Examples: 35555
+ NetSockHostPortKey = attribute.Key("net.sock.host.port")
+)
+
+var (
+ // ip_tcp
+ NetTransportTCP = NetTransportKey.String("ip_tcp")
+ // ip_udp
+ NetTransportUDP = NetTransportKey.String("ip_udp")
+ // Named or anonymous pipe. See note below
+ NetTransportPipe = NetTransportKey.String("pipe")
+ // In-process communication
+ NetTransportInProc = NetTransportKey.String("inproc")
+ // Something else (non IP-based)
+ NetTransportOther = NetTransportKey.String("other")
+)
+
+var (
+ // IPv4 address
+ NetSockFamilyInet = NetSockFamilyKey.String("inet")
+ // IPv6 address
+ NetSockFamilyInet6 = NetSockFamilyKey.String("inet6")
+ // Unix domain socket path
+ NetSockFamilyUnix = NetSockFamilyKey.String("unix")
+)
+
+// NetProtocolName returns an attribute KeyValue conforming to the
+// "net.protocol.name" semantic conventions. It represents the application
+// layer protocol used. The value SHOULD be normalized to lowercase.
+func NetProtocolName(val string) attribute.KeyValue {
+ return NetProtocolNameKey.String(val)
+}
+
+// NetProtocolVersion returns an attribute KeyValue conforming to the
+// "net.protocol.version" semantic conventions. It represents the version of
+// the application layer protocol used. See note below.
+func NetProtocolVersion(val string) attribute.KeyValue {
+ return NetProtocolVersionKey.String(val)
+}
+
+// NetSockPeerName returns an attribute KeyValue conforming to the
+// "net.sock.peer.name" semantic conventions. It represents the remote socket
+// peer name.
+func NetSockPeerName(val string) attribute.KeyValue {
+ return NetSockPeerNameKey.String(val)
+}
+
+// NetSockPeerAddr returns an attribute KeyValue conforming to the
+// "net.sock.peer.addr" semantic conventions. It represents the remote socket
+// peer address: IPv4 or IPv6 for internet protocols, path for local
+// communication,
+// [etc](https://man7.org/linux/man-pages/man7/address_families.7.html).
+func NetSockPeerAddr(val string) attribute.KeyValue {
+ return NetSockPeerAddrKey.String(val)
+}
+
+// NetSockPeerPort returns an attribute KeyValue conforming to the
+// "net.sock.peer.port" semantic conventions. It represents the remote socket
+// peer port.
+func NetSockPeerPort(val int) attribute.KeyValue {
+ return NetSockPeerPortKey.Int(val)
+}
+
+// NetPeerName returns an attribute KeyValue conforming to the
+// "net.peer.name" semantic conventions. It represents the logical remote
+// hostname, see note below.
+func NetPeerName(val string) attribute.KeyValue {
+ return NetPeerNameKey.String(val)
+}
+
+// NetPeerPort returns an attribute KeyValue conforming to the
+// "net.peer.port" semantic conventions. It represents the logical remote port
+// number
+func NetPeerPort(val int) attribute.KeyValue {
+ return NetPeerPortKey.Int(val)
+}
+
+// NetHostName returns an attribute KeyValue conforming to the
+// "net.host.name" semantic conventions. It represents the logical local
+// hostname or similar, see note below.
+func NetHostName(val string) attribute.KeyValue {
+ return NetHostNameKey.String(val)
+}
+
+// NetHostPort returns an attribute KeyValue conforming to the
+// "net.host.port" semantic conventions. It represents the logical local port
+// number, preferably the one that the peer used to connect
+func NetHostPort(val int) attribute.KeyValue {
+ return NetHostPortKey.Int(val)
+}
+
+// NetSockHostAddr returns an attribute KeyValue conforming to the
+// "net.sock.host.addr" semantic conventions. It represents the local socket
+// address. Useful in case of a multi-IP host.
+func NetSockHostAddr(val string) attribute.KeyValue {
+ return NetSockHostAddrKey.String(val)
+}
+
+// NetSockHostPort returns an attribute KeyValue conforming to the
+// "net.sock.host.port" semantic conventions. It represents the local socket
+// port number.
+func NetSockHostPort(val int) attribute.KeyValue {
+ return NetSockHostPortKey.Int(val)
+}
+
+// These attributes may be used for any network related operation.
+const (
+ // NetHostConnectionTypeKey is the attribute Key conforming to the
+ // "net.host.connection.type" semantic conventions. It represents the
+ // internet connection type currently being used by the host.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'wifi'
+ NetHostConnectionTypeKey = attribute.Key("net.host.connection.type")
+
+ // NetHostConnectionSubtypeKey is the attribute Key conforming to the
+ // "net.host.connection.subtype" semantic conventions. It represents the
+ // this describes more details regarding the connection.type. It may be the
+ // type of cell technology connection, but it could be used for describing
+ // details about a wifi connection.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'LTE'
+ NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype")
+
+ // NetHostCarrierNameKey is the attribute Key conforming to the
+ // "net.host.carrier.name" semantic conventions. It represents the name of
+ // the mobile carrier.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'sprint'
+ NetHostCarrierNameKey = attribute.Key("net.host.carrier.name")
+
+ // NetHostCarrierMccKey is the attribute Key conforming to the
+ // "net.host.carrier.mcc" semantic conventions. It represents the mobile
+ // carrier country code.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '310'
+ NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc")
+
+ // NetHostCarrierMncKey is the attribute Key conforming to the
+ // "net.host.carrier.mnc" semantic conventions. It represents the mobile
+ // carrier network code.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '001'
+ NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc")
+
+ // NetHostCarrierIccKey is the attribute Key conforming to the
+ // "net.host.carrier.icc" semantic conventions. It represents the ISO
+ // 3166-1 alpha-2 2-character country code associated with the mobile
+ // carrier network.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'DE'
+ NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc")
+)
+
+var (
+ // wifi
+ NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi")
+ // wired
+ NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired")
+ // cell
+ NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell")
+ // unavailable
+ NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable")
+ // unknown
+ NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown")
+)
+
+var (
+ // GPRS
+ NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs")
+ // EDGE
+ NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge")
+ // UMTS
+ NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts")
+ // CDMA
+ NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma")
+ // EVDO Rel. 0
+ NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0")
+ // EVDO Rev. A
+ NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a")
+ // CDMA2000 1XRTT
+ NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt")
+ // HSDPA
+ NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa")
+ // HSUPA
+ NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa")
+ // HSPA
+ NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa")
+ // IDEN
+ NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden")
+ // EVDO Rev. B
+ NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b")
+ // LTE
+ NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte")
+ // EHRPD
+ NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd")
+ // HSPAP
+ NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap")
+ // GSM
+ NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm")
+ // TD-SCDMA
+ NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma")
+ // IWLAN
+ NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan")
+ // 5G NR (New Radio)
+ NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr")
+ // 5G NRNSA (New Radio Non-Standalone)
+ NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa")
+ // LTE CA
+ NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca")
+)
+
+// NetHostCarrierName returns an attribute KeyValue conforming to the
+// "net.host.carrier.name" semantic conventions. It represents the name of the
+// mobile carrier.
+func NetHostCarrierName(val string) attribute.KeyValue {
+ return NetHostCarrierNameKey.String(val)
+}
+
+// NetHostCarrierMcc returns an attribute KeyValue conforming to the
+// "net.host.carrier.mcc" semantic conventions. It represents the mobile
+// carrier country code.
+func NetHostCarrierMcc(val string) attribute.KeyValue {
+ return NetHostCarrierMccKey.String(val)
+}
+
+// NetHostCarrierMnc returns an attribute KeyValue conforming to the
+// "net.host.carrier.mnc" semantic conventions. It represents the mobile
+// carrier network code.
+func NetHostCarrierMnc(val string) attribute.KeyValue {
+ return NetHostCarrierMncKey.String(val)
+}
+
+// NetHostCarrierIcc returns an attribute KeyValue conforming to the
+// "net.host.carrier.icc" semantic conventions. It represents the ISO 3166-1
+// alpha-2 2-character country code associated with the mobile carrier network.
+func NetHostCarrierIcc(val string) attribute.KeyValue {
+ return NetHostCarrierIccKey.String(val)
+}
+
+// Semantic conventions for HTTP client and server Spans.
+const (
+ // HTTPRequestContentLengthKey is the attribute Key conforming to the
+ // "http.request_content_length" semantic conventions. It represents the
+ // size of the request payload body in bytes. This is the number of bytes
+ // transferred excluding headers and is often, but not always, present as
+ // the
+ // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+ // header. For requests using transport encoding, this should be the
+ // compressed size.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 3495
+ HTTPRequestContentLengthKey = attribute.Key("http.request_content_length")
+
+ // HTTPResponseContentLengthKey is the attribute Key conforming to the
+ // "http.response_content_length" semantic conventions. It represents the
+ // size of the response payload body in bytes. This is the number of bytes
+ // transferred excluding headers and is often, but not always, present as
+ // the
+ // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+ // header. For requests using transport encoding, this should be the
+ // compressed size.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 3495
+ HTTPResponseContentLengthKey = attribute.Key("http.response_content_length")
+)
+
+// HTTPRequestContentLength returns an attribute KeyValue conforming to the
+// "http.request_content_length" semantic conventions. It represents the size
+// of the request payload body in bytes. This is the number of bytes
+// transferred excluding headers and is often, but not always, present as the
+// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+// header. For requests using transport encoding, this should be the compressed
+// size.
+func HTTPRequestContentLength(val int) attribute.KeyValue {
+ return HTTPRequestContentLengthKey.Int(val)
+}
+
+// HTTPResponseContentLength returns an attribute KeyValue conforming to the
+// "http.response_content_length" semantic conventions. It represents the size
+// of the response payload body in bytes. This is the number of bytes
+// transferred excluding headers and is often, but not always, present as the
+// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+// header. For requests using transport encoding, this should be the compressed
+// size.
+func HTTPResponseContentLength(val int) attribute.KeyValue {
+ return HTTPResponseContentLengthKey.Int(val)
+}
+
+// Semantic convention describing per-message attributes populated on messaging
+// spans or links.
+const (
+ // MessagingMessageIDKey is the attribute Key conforming to the
+ // "messaging.message.id" semantic conventions. It represents a value used
+ // by the messaging system as an identifier for the message, represented as
+ // a string.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '452a7c7c7c7048c2f887f61572b18fc2'
+ MessagingMessageIDKey = attribute.Key("messaging.message.id")
+
+ // MessagingMessageConversationIDKey is the attribute Key conforming to the
+ // "messaging.message.conversation_id" semantic conventions. It represents
+ // the [conversation ID](#conversations) identifying the conversation to
+ // which the message belongs, represented as a string. Sometimes called
+ // "Correlation ID".
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'MyConversationID'
+ MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id")
+
+ // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to
+ // the "messaging.message.payload_size_bytes" semantic conventions. It
+ // represents the (uncompressed) size of the message payload in bytes. Also
+ // use this attribute if it is unknown whether the compressed or
+ // uncompressed payload size is reported.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 2738
+ MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes")
+
+ // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key
+ // conforming to the "messaging.message.payload_compressed_size_bytes"
+ // semantic conventions. It represents the compressed size of the message
+ // payload in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 2048
+ MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes")
+)
+
+// MessagingMessageID returns an attribute KeyValue conforming to the
+// "messaging.message.id" semantic conventions. It represents a value used by
+// the messaging system as an identifier for the message, represented as a
+// string.
+func MessagingMessageID(val string) attribute.KeyValue {
+ return MessagingMessageIDKey.String(val)
+}
+
+// MessagingMessageConversationID returns an attribute KeyValue conforming
+// to the "messaging.message.conversation_id" semantic conventions. It
+// represents the [conversation ID](#conversations) identifying the
+// conversation to which the message belongs, represented as a string.
+// Sometimes called "Correlation ID".
+func MessagingMessageConversationID(val string) attribute.KeyValue {
+ return MessagingMessageConversationIDKey.String(val)
+}
+
+// MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming
+// to the "messaging.message.payload_size_bytes" semantic conventions. It
+// represents the (uncompressed) size of the message payload in bytes. Also use
+// this attribute if it is unknown whether the compressed or uncompressed
+// payload size is reported.
+func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue {
+ return MessagingMessagePayloadSizeBytesKey.Int(val)
+}
+
+// MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue
+// conforming to the "messaging.message.payload_compressed_size_bytes" semantic
+// conventions. It represents the compressed size of the message payload in
+// bytes.
+func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue {
+ return MessagingMessagePayloadCompressedSizeBytesKey.Int(val)
+}
+
+// Semantic convention for attributes that describe messaging destination on
+// broker
+const (
+ // MessagingDestinationNameKey is the attribute Key conforming to the
+ // "messaging.destination.name" semantic conventions. It represents the
+ // message destination name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'MyQueue', 'MyTopic'
+ // Note: Destination name SHOULD uniquely identify a specific queue, topic
+ // or other entity within the broker. If
+ // the broker does not have such notion, the destination name SHOULD
+ // uniquely identify the broker.
+ MessagingDestinationNameKey = attribute.Key("messaging.destination.name")
+
+ // MessagingDestinationTemplateKey is the attribute Key conforming to the
+ // "messaging.destination.template" semantic conventions. It represents the
+ // low cardinality representation of the messaging destination name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/customers/{customerID}'
+ // Note: Destination names could be constructed from templates. An example
+ // would be a destination name involving a user name or product id.
+ // Although the destination name in this case is of high cardinality, the
+ // underlying template is of low cardinality and can be effectively used
+ // for grouping and aggregation.
+ MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template")
+
+ // MessagingDestinationTemporaryKey is the attribute Key conforming to the
+ // "messaging.destination.temporary" semantic conventions. It represents a
+ // boolean that is true if the message destination is temporary and might
+ // not exist anymore after messages are processed.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary")
+
+ // MessagingDestinationAnonymousKey is the attribute Key conforming to the
+ // "messaging.destination.anonymous" semantic conventions. It represents a
+ // boolean that is true if the message destination is anonymous (could be
+ // unnamed or have auto-generated name).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous")
+)
+
+// MessagingDestinationName returns an attribute KeyValue conforming to the
+// "messaging.destination.name" semantic conventions. It represents the message
+// destination name
+func MessagingDestinationName(val string) attribute.KeyValue {
+ return MessagingDestinationNameKey.String(val)
+}
+
+// MessagingDestinationTemplate returns an attribute KeyValue conforming to
+// the "messaging.destination.template" semantic conventions. It represents the
+// low cardinality representation of the messaging destination name
+func MessagingDestinationTemplate(val string) attribute.KeyValue {
+ return MessagingDestinationTemplateKey.String(val)
+}
+
+// MessagingDestinationTemporary returns an attribute KeyValue conforming to
+// the "messaging.destination.temporary" semantic conventions. It represents a
+// boolean that is true if the message destination is temporary and might not
+// exist anymore after messages are processed.
+func MessagingDestinationTemporary(val bool) attribute.KeyValue {
+ return MessagingDestinationTemporaryKey.Bool(val)
+}
+
+// MessagingDestinationAnonymous returns an attribute KeyValue conforming to
+// the "messaging.destination.anonymous" semantic conventions. It represents a
+// boolean that is true if the message destination is anonymous (could be
+// unnamed or have auto-generated name).
+func MessagingDestinationAnonymous(val bool) attribute.KeyValue {
+ return MessagingDestinationAnonymousKey.Bool(val)
+}
+
+// Semantic convention for attributes that describe messaging source on broker
+const (
+ // MessagingSourceNameKey is the attribute Key conforming to the
+ // "messaging.source.name" semantic conventions. It represents the message
+ // source name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'MyQueue', 'MyTopic'
+ // Note: Source name SHOULD uniquely identify a specific queue, topic, or
+ // other entity within the broker. If
+ // the broker does not have such notion, the source name SHOULD uniquely
+ // identify the broker.
+ MessagingSourceNameKey = attribute.Key("messaging.source.name")
+
+ // MessagingSourceTemplateKey is the attribute Key conforming to the
+ // "messaging.source.template" semantic conventions. It represents the low
+ // cardinality representation of the messaging source name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/customers/{customerID}'
+ // Note: Source names could be constructed from templates. An example would
+ // be a source name involving a user name or product id. Although the
+ // source name in this case is of high cardinality, the underlying template
+ // is of low cardinality and can be effectively used for grouping and
+ // aggregation.
+ MessagingSourceTemplateKey = attribute.Key("messaging.source.template")
+
+ // MessagingSourceTemporaryKey is the attribute Key conforming to the
+ // "messaging.source.temporary" semantic conventions. It represents a
+ // boolean that is true if the message source is temporary and might not
+ // exist anymore after messages are processed.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingSourceTemporaryKey = attribute.Key("messaging.source.temporary")
+
+ // MessagingSourceAnonymousKey is the attribute Key conforming to the
+ // "messaging.source.anonymous" semantic conventions. It represents a
+ // boolean that is true if the message source is anonymous (could be
+ // unnamed or have auto-generated name).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingSourceAnonymousKey = attribute.Key("messaging.source.anonymous")
+)
+
+// MessagingSourceName returns an attribute KeyValue conforming to the
+// "messaging.source.name" semantic conventions. It represents the message
+// source name
+func MessagingSourceName(val string) attribute.KeyValue {
+ return MessagingSourceNameKey.String(val)
+}
+
+// MessagingSourceTemplate returns an attribute KeyValue conforming to the
+// "messaging.source.template" semantic conventions. It represents the low
+// cardinality representation of the messaging source name
+func MessagingSourceTemplate(val string) attribute.KeyValue {
+ return MessagingSourceTemplateKey.String(val)
+}
+
+// MessagingSourceTemporary returns an attribute KeyValue conforming to the
+// "messaging.source.temporary" semantic conventions. It represents a boolean
+// that is true if the message source is temporary and might not exist anymore
+// after messages are processed.
+func MessagingSourceTemporary(val bool) attribute.KeyValue {
+ return MessagingSourceTemporaryKey.Bool(val)
+}
+
+// MessagingSourceAnonymous returns an attribute KeyValue conforming to the
+// "messaging.source.anonymous" semantic conventions. It represents a boolean
+// that is true if the message source is anonymous (could be unnamed or have
+// auto-generated name).
+func MessagingSourceAnonymous(val bool) attribute.KeyValue {
+ return MessagingSourceAnonymousKey.Bool(val)
+}
+
+// Attributes for RabbitMQ
+const (
+ // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key
+ // conforming to the "messaging.rabbitmq.destination.routing_key" semantic
+ // conventions. It represents the rabbitMQ message routing key.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If not empty.)
+ // Stability: stable
+ // Examples: 'myKey'
+ MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key")
+)
+
+// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue
+// conforming to the "messaging.rabbitmq.destination.routing_key" semantic
+// conventions. It represents the rabbitMQ message routing key.
+func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue {
+ return MessagingRabbitmqDestinationRoutingKeyKey.String(val)
+}
+
+// Attributes for Apache Kafka
+const (
+ // MessagingKafkaMessageKeyKey is the attribute Key conforming to the
+ // "messaging.kafka.message.key" semantic conventions. It represents the
+ // message keys in Kafka are used for grouping alike messages to ensure
+ // they're processed on the same partition. They differ from
+ // `messaging.message.id` in that they're not unique. If the key is `null`,
+ // the attribute MUST NOT be set.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'myKey'
+ // Note: If the key type is not string, it's string representation has to
+ // be supplied for the attribute. If the key has no unambiguous, canonical
+ // string form, don't include its value.
+ MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key")
+
+ // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the
+ // "messaging.kafka.consumer.group" semantic conventions. It represents the
+ // name of the Kafka Consumer Group that is handling the message. Only
+ // applies to consumers, not producers.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'my-group'
+ MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group")
+
+ // MessagingKafkaClientIDKey is the attribute Key conforming to the
+ // "messaging.kafka.client_id" semantic conventions. It represents the
+ // client ID for the Consumer or Producer that is handling the message.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'client-5'
+ MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id")
+
+ // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to
+ // the "messaging.kafka.destination.partition" semantic conventions. It
+ // represents the partition the message is sent to.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 2
+ MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition")
+
+ // MessagingKafkaSourcePartitionKey is the attribute Key conforming to the
+ // "messaging.kafka.source.partition" semantic conventions. It represents
+ // the partition the message is received from.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 2
+ MessagingKafkaSourcePartitionKey = attribute.Key("messaging.kafka.source.partition")
+
+ // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the
+ // "messaging.kafka.message.offset" semantic conventions. It represents the
+ // offset of a record in the corresponding Kafka partition.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 42
+ MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset")
+
+ // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the
+ // "messaging.kafka.message.tombstone" semantic conventions. It represents
+ // a boolean that is true if the message is a tombstone.
+ //
+ // Type: boolean
+ // RequirementLevel: ConditionallyRequired (If value is `true`. When
+ // missing, the value is assumed to be `false`.)
+ // Stability: stable
+ MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone")
+)
+
+// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the
+// "messaging.kafka.message.key" semantic conventions. It represents the
+// message keys in Kafka are used for grouping alike messages to ensure they're
+// processed on the same partition. They differ from `messaging.message.id` in
+// that they're not unique. If the key is `null`, the attribute MUST NOT be
+// set.
+func MessagingKafkaMessageKey(val string) attribute.KeyValue {
+ return MessagingKafkaMessageKeyKey.String(val)
+}
+
+// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to
+// the "messaging.kafka.consumer.group" semantic conventions. It represents the
+// name of the Kafka Consumer Group that is handling the message. Only applies
+// to consumers, not producers.
+func MessagingKafkaConsumerGroup(val string) attribute.KeyValue {
+ return MessagingKafkaConsumerGroupKey.String(val)
+}
+
+// MessagingKafkaClientID returns an attribute KeyValue conforming to the
+// "messaging.kafka.client_id" semantic conventions. It represents the client
+// ID for the Consumer or Producer that is handling the message.
+func MessagingKafkaClientID(val string) attribute.KeyValue {
+ return MessagingKafkaClientIDKey.String(val)
+}
+
+// MessagingKafkaDestinationPartition returns an attribute KeyValue
+// conforming to the "messaging.kafka.destination.partition" semantic
+// conventions. It represents the partition the message is sent to.
+func MessagingKafkaDestinationPartition(val int) attribute.KeyValue {
+ return MessagingKafkaDestinationPartitionKey.Int(val)
+}
+
+// MessagingKafkaSourcePartition returns an attribute KeyValue conforming to
+// the "messaging.kafka.source.partition" semantic conventions. It represents
+// the partition the message is received from.
+func MessagingKafkaSourcePartition(val int) attribute.KeyValue {
+ return MessagingKafkaSourcePartitionKey.Int(val)
+}
+
+// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to
+// the "messaging.kafka.message.offset" semantic conventions. It represents the
+// offset of a record in the corresponding Kafka partition.
+func MessagingKafkaMessageOffset(val int) attribute.KeyValue {
+ return MessagingKafkaMessageOffsetKey.Int(val)
+}
+
+// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming
+// to the "messaging.kafka.message.tombstone" semantic conventions. It
+// represents a boolean that is true if the message is a tombstone.
+func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue {
+ return MessagingKafkaMessageTombstoneKey.Bool(val)
+}
+
+// Attributes for Apache RocketMQ
+const (
+ // MessagingRocketmqNamespaceKey is the attribute Key conforming to the
+ // "messaging.rocketmq.namespace" semantic conventions. It represents the
+ // namespace of RocketMQ resources, resources in different namespaces are
+ // individual.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'myNamespace'
+ MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace")
+
+ // MessagingRocketmqClientGroupKey is the attribute Key conforming to the
+ // "messaging.rocketmq.client_group" semantic conventions. It represents
+ // the name of the RocketMQ producer/consumer group that is handling the
+ // message. The client type is identified by the SpanKind.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'myConsumerGroup'
+ MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group")
+
+ // MessagingRocketmqClientIDKey is the attribute Key conforming to the
+ // "messaging.rocketmq.client_id" semantic conventions. It represents the
+ // unique identifier for each client.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'myhost@8742@s8083jm'
+ MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id")
+
+ // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key
+ // conforming to the "messaging.rocketmq.message.delivery_timestamp"
+ // semantic conventions. It represents the timestamp in milliseconds that
+ // the delay message is expected to be delivered to consumer.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If the message type is delay
+ // and delay time level is not specified.)
+ // Stability: stable
+ // Examples: 1665987217045
+ MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp")
+
+ // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key
+ // conforming to the "messaging.rocketmq.message.delay_time_level" semantic
+ // conventions. It represents the delay time level for delay message, which
+ // determines the message delay time.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If the message type is delay
+ // and delivery timestamp is not specified.)
+ // Stability: stable
+ // Examples: 3
+ MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level")
+
+ // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.group" semantic conventions. It represents
+ // the it is essential for FIFO message. Messages that belong to the same
+ // message group are always processed one by one within the same consumer
+ // group.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If the message type is FIFO.)
+ // Stability: stable
+ // Examples: 'myMessageGroup'
+ MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group")
+
+ // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.type" semantic conventions. It represents
+ // the type of message.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type")
+
+ // MessagingRocketmqMessageTagKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.tag" semantic conventions. It represents the
+ // secondary classifier of message besides topic.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'tagA'
+ MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag")
+
+ // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.keys" semantic conventions. It represents
+ // the key(s) of message, another way to mark message besides message id.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'keyA', 'keyB'
+ MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys")
+
+ // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to
+ // the "messaging.rocketmq.consumption_model" semantic conventions. It
+ // represents the model of message consumption. This only applies to
+ // consumer spans.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model")
+)
+
+var (
+ // Normal message
+ MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal")
+ // FIFO message
+ MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo")
+ // Delay message
+ MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay")
+ // Transaction message
+ MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction")
+)
+
+var (
+ // Clustering consumption model
+ MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering")
+ // Broadcasting consumption model
+ MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting")
+)
+
+// MessagingRocketmqNamespace returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.namespace" semantic conventions. It represents the
+// namespace of RocketMQ resources, resources in different namespaces are
+// individual.
+func MessagingRocketmqNamespace(val string) attribute.KeyValue {
+ return MessagingRocketmqNamespaceKey.String(val)
+}
+
+// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.client_group" semantic conventions. It represents
+// the name of the RocketMQ producer/consumer group that is handling the
+// message. The client type is identified by the SpanKind.
+func MessagingRocketmqClientGroup(val string) attribute.KeyValue {
+ return MessagingRocketmqClientGroupKey.String(val)
+}
+
+// MessagingRocketmqClientID returns an attribute KeyValue conforming to the
+// "messaging.rocketmq.client_id" semantic conventions. It represents the
+// unique identifier for each client.
+func MessagingRocketmqClientID(val string) attribute.KeyValue {
+ return MessagingRocketmqClientIDKey.String(val)
+}
+
+// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic
+// conventions. It represents the timestamp in milliseconds that the delay
+// message is expected to be delivered to consumer.
+func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue {
+ return MessagingRocketmqMessageDeliveryTimestampKey.Int(val)
+}
+
+// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delay_time_level" semantic
+// conventions. It represents the delay time level for delay message, which
+// determines the message delay time.
+func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue {
+ return MessagingRocketmqMessageDelayTimeLevelKey.Int(val)
+}
+
+// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.group" semantic conventions. It represents
+// the it is essential for FIFO message. Messages that belong to the same
+// message group are always processed one by one within the same consumer
+// group.
+func MessagingRocketmqMessageGroup(val string) attribute.KeyValue {
+ return MessagingRocketmqMessageGroupKey.String(val)
+}
+
+// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.tag" semantic conventions. It represents the
+// secondary classifier of message besides topic.
+func MessagingRocketmqMessageTag(val string) attribute.KeyValue {
+ return MessagingRocketmqMessageTagKey.String(val)
+}
+
+// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.keys" semantic conventions. It represents
+// the key(s) of message, another way to mark message besides message id.
+func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue {
+ return MessagingRocketmqMessageKeysKey.StringSlice(val)
+}
+
+// Describes user-agent attributes.
+const (
+ // UserAgentOriginalKey is the attribute Key conforming to the
+ // "user_agent.original" semantic conventions. It represents the value of
+ // the [HTTP
+ // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
+ // header sent by the client.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'CERN-LineMode/2.15 libwww/2.17b3'
+ UserAgentOriginalKey = attribute.Key("user_agent.original")
+)
+
+// UserAgentOriginal returns an attribute KeyValue conforming to the
+// "user_agent.original" semantic conventions. It represents the value of the
+// [HTTP
+// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
+// header sent by the client.
+func UserAgentOriginal(val string) attribute.KeyValue {
+ return UserAgentOriginalKey.String(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go
new file mode 100644
index 000000000..0d1f55a8f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package semconv implements OpenTelemetry semantic conventions.
+//
+// OpenTelemetry semantic conventions are agreed standardized naming
+// patterns for OpenTelemetry things. This package represents the conventions
+// as of the v1.20.0 version of the OpenTelemetry specification.
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go
new file mode 100644
index 000000000..637763932
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go
@@ -0,0 +1,188 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// This semantic convention defines the attributes used to represent a feature
+// flag evaluation as an event.
+const (
+ // FeatureFlagKeyKey is the attribute Key conforming to the
+ // "feature_flag.key" semantic conventions. It represents the unique
+ // identifier of the feature flag.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'logo-color'
+ FeatureFlagKeyKey = attribute.Key("feature_flag.key")
+
+ // FeatureFlagProviderNameKey is the attribute Key conforming to the
+ // "feature_flag.provider_name" semantic conventions. It represents the
+ // name of the service provider that performs the flag evaluation.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'Flag Manager'
+ FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name")
+
+ // FeatureFlagVariantKey is the attribute Key conforming to the
+ // "feature_flag.variant" semantic conventions. It represents the sHOULD be
+ // a semantic identifier for a value. If one is unavailable, a stringified
+ // version of the value can be used.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'red', 'true', 'on'
+ // Note: A semantic identifier, commonly referred to as a variant, provides
+ // a means
+ // for referring to a value without including the value itself. This can
+ // provide additional context for understanding the meaning behind a value.
+ // For example, the variant `red` maybe be used for the value `#c05543`.
+ //
+ // A stringified version of the value can be used in situations where a
+ // semantic identifier is unavailable. String representation of the value
+ // should be determined by the implementer.
+ FeatureFlagVariantKey = attribute.Key("feature_flag.variant")
+)
+
+// FeatureFlagKey returns an attribute KeyValue conforming to the
+// "feature_flag.key" semantic conventions. It represents the unique identifier
+// of the feature flag.
+func FeatureFlagKey(val string) attribute.KeyValue {
+ return FeatureFlagKeyKey.String(val)
+}
+
+// FeatureFlagProviderName returns an attribute KeyValue conforming to the
+// "feature_flag.provider_name" semantic conventions. It represents the name of
+// the service provider that performs the flag evaluation.
+func FeatureFlagProviderName(val string) attribute.KeyValue {
+ return FeatureFlagProviderNameKey.String(val)
+}
+
+// FeatureFlagVariant returns an attribute KeyValue conforming to the
+// "feature_flag.variant" semantic conventions. It represents the sHOULD be a
+// semantic identifier for a value. If one is unavailable, a stringified
+// version of the value can be used.
+func FeatureFlagVariant(val string) attribute.KeyValue {
+ return FeatureFlagVariantKey.String(val)
+}
+
+// RPC received/sent message.
+const (
+ // MessageTypeKey is the attribute Key conforming to the "message.type"
+ // semantic conventions. It represents the whether this is a received or
+ // sent message.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessageTypeKey = attribute.Key("message.type")
+
+ // MessageIDKey is the attribute Key conforming to the "message.id"
+ // semantic conventions. It represents the mUST be calculated as two
+ // different counters starting from `1` one for sent messages and one for
+ // received message.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: This way we guarantee that the values will be consistent between
+ // different implementations.
+ MessageIDKey = attribute.Key("message.id")
+
+ // MessageCompressedSizeKey is the attribute Key conforming to the
+ // "message.compressed_size" semantic conventions. It represents the
+ // compressed size of the message in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessageCompressedSizeKey = attribute.Key("message.compressed_size")
+
+ // MessageUncompressedSizeKey is the attribute Key conforming to the
+ // "message.uncompressed_size" semantic conventions. It represents the
+ // uncompressed size of the message in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size")
+)
+
+var (
+ // sent
+ MessageTypeSent = MessageTypeKey.String("SENT")
+ // received
+ MessageTypeReceived = MessageTypeKey.String("RECEIVED")
+)
+
+// MessageID returns an attribute KeyValue conforming to the "message.id"
+// semantic conventions. It represents the mUST be calculated as two different
+// counters starting from `1` one for sent messages and one for received
+// message.
+func MessageID(val int) attribute.KeyValue {
+ return MessageIDKey.Int(val)
+}
+
+// MessageCompressedSize returns an attribute KeyValue conforming to the
+// "message.compressed_size" semantic conventions. It represents the compressed
+// size of the message in bytes.
+func MessageCompressedSize(val int) attribute.KeyValue {
+ return MessageCompressedSizeKey.Int(val)
+}
+
+// MessageUncompressedSize returns an attribute KeyValue conforming to the
+// "message.uncompressed_size" semantic conventions. It represents the
+// uncompressed size of the message in bytes.
+func MessageUncompressedSize(val int) attribute.KeyValue {
+ return MessageUncompressedSizeKey.Int(val)
+}
+
+// The attributes used to report a single exception associated with a span.
+const (
+ // ExceptionEscapedKey is the attribute Key conforming to the
+ // "exception.escaped" semantic conventions. It represents the sHOULD be
+ // set to true if the exception event is recorded at a point where it is
+ // known that the exception is escaping the scope of the span.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: An exception is considered to have escaped (or left) the scope of
+ // a span,
+ // if that span is ended while the exception is still logically "in
+ // flight".
+ // This may be actually "in flight" in some languages (e.g. if the
+ // exception
+ // is passed to a Context manager's `__exit__` method in Python) but will
+ // usually be caught at the point of recording the exception in most
+ // languages.
+ //
+ // It is usually not possible to determine at the point where an exception
+ // is thrown
+ // whether it will escape the scope of a span.
+ // However, it is trivial to know that an exception
+ // will escape, if one checks for an active exception just before ending
+ // the span,
+ // as done in the [example above](#recording-an-exception).
+ //
+ // It follows that an exception may still escape the scope of the span
+ // even if the `exception.escaped` attribute was not set or set to false,
+ // since the event might have been recorded at a time where it was not
+ // clear whether the exception will escape.
+ ExceptionEscapedKey = attribute.Key("exception.escaped")
+)
+
+// ExceptionEscaped returns an attribute KeyValue conforming to the
+// "exception.escaped" semantic conventions. It represents the sHOULD be set to
+// true if the exception event is recorded at a point where it is known that
+// the exception is escaping the scope of the span.
+func ExceptionEscaped(val bool) attribute.KeyValue {
+ return ExceptionEscapedKey.Bool(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go
new file mode 100644
index 000000000..f40c97825
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
+
+const (
+ // ExceptionEventName is the name of the Span event representing an exception.
+ ExceptionEventName = "exception"
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go
new file mode 100644
index 000000000..9c1840631
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go
@@ -0,0 +1,10 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
+
+// HTTP scheme attributes.
+var (
+ HTTPSchemeHTTP = HTTPSchemeKey.String("http")
+ HTTPSchemeHTTPS = HTTPSchemeKey.String("https")
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go
new file mode 100644
index 000000000..3d44dae27
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go
@@ -0,0 +1,2060 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// The web browser in which the application represented by the resource is
+// running. The `browser.*` attributes MUST be used only for resources that
+// represent applications running in a web browser (regardless of whether
+// running on a mobile or desktop device).
+const (
+ // BrowserBrandsKey is the attribute Key conforming to the "browser.brands"
+ // semantic conventions. It represents the array of brand name and version
+ // separated by a space
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99'
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.brands`).
+ BrowserBrandsKey = attribute.Key("browser.brands")
+
+ // BrowserPlatformKey is the attribute Key conforming to the
+ // "browser.platform" semantic conventions. It represents the platform on
+ // which the browser is running
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Windows', 'macOS', 'Android'
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.platform`). If unavailable, the legacy
+ // `navigator.platform` API SHOULD NOT be used instead and this attribute
+ // SHOULD be left unset in order for the values to be consistent.
+ // The list of possible values is defined in the [W3C User-Agent Client
+ // Hints
+ // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform).
+ // Note that some (but not all) of these values can overlap with values in
+ // the [`os.type` and `os.name` attributes](./os.md). However, for
+ // consistency, the values in the `browser.platform` attribute should
+ // capture the exact value that the user agent provides.
+ BrowserPlatformKey = attribute.Key("browser.platform")
+
+ // BrowserMobileKey is the attribute Key conforming to the "browser.mobile"
+ // semantic conventions. It represents a boolean that is true if the
+ // browser is running on a mobile device
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.mobile`). If unavailable, this attribute
+ // SHOULD be left unset.
+ BrowserMobileKey = attribute.Key("browser.mobile")
+
+ // BrowserLanguageKey is the attribute Key conforming to the
+ // "browser.language" semantic conventions. It represents the preferred
+ // language of the user using the browser
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'en', 'en-US', 'fr', 'fr-FR'
+ // Note: This value is intended to be taken from the Navigator API
+ // `navigator.language`.
+ BrowserLanguageKey = attribute.Key("browser.language")
+)
+
+// BrowserBrands returns an attribute KeyValue conforming to the
+// "browser.brands" semantic conventions. It represents the array of brand name
+// and version separated by a space
+func BrowserBrands(val ...string) attribute.KeyValue {
+ return BrowserBrandsKey.StringSlice(val)
+}
+
+// BrowserPlatform returns an attribute KeyValue conforming to the
+// "browser.platform" semantic conventions. It represents the platform on which
+// the browser is running
+func BrowserPlatform(val string) attribute.KeyValue {
+ return BrowserPlatformKey.String(val)
+}
+
+// BrowserMobile returns an attribute KeyValue conforming to the
+// "browser.mobile" semantic conventions. It represents a boolean that is true
+// if the browser is running on a mobile device
+func BrowserMobile(val bool) attribute.KeyValue {
+ return BrowserMobileKey.Bool(val)
+}
+
+// BrowserLanguage returns an attribute KeyValue conforming to the
+// "browser.language" semantic conventions. It represents the preferred
+// language of the user using the browser
+func BrowserLanguage(val string) attribute.KeyValue {
+ return BrowserLanguageKey.String(val)
+}
+
+// A cloud environment (e.g. GCP, Azure, AWS)
+const (
+ // CloudProviderKey is the attribute Key conforming to the "cloud.provider"
+ // semantic conventions. It represents the name of the cloud provider.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ CloudProviderKey = attribute.Key("cloud.provider")
+
+ // CloudAccountIDKey is the attribute Key conforming to the
+ // "cloud.account.id" semantic conventions. It represents the cloud account
+ // ID the resource is assigned to.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '111111111111', 'opentelemetry'
+ CloudAccountIDKey = attribute.Key("cloud.account.id")
+
+ // CloudRegionKey is the attribute Key conforming to the "cloud.region"
+ // semantic conventions. It represents the geographical region the resource
+ // is running.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'us-central1', 'us-east-1'
+ // Note: Refer to your provider's docs to see the available regions, for
+ // example [Alibaba Cloud
+ // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS
+ // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/),
+ // [Azure
+ // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/),
+ // [Google Cloud regions](https://cloud.google.com/about/locations), or
+ // [Tencent Cloud
+ // regions](https://www.tencentcloud.com/document/product/213/6091).
+ CloudRegionKey = attribute.Key("cloud.region")
+
+ // CloudResourceIDKey is the attribute Key conforming to the
+ // "cloud.resource_id" semantic conventions. It represents the cloud
+ // provider-specific native identifier of the monitored cloud resource
+ // (e.g. an
+ // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
+ // on AWS, a [fully qualified resource
+ // ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id)
+ // on Azure, a [full resource
+ // name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
+ // on GCP)
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function',
+ // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID',
+ // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/'
+ // Note: On some cloud providers, it may not be possible to determine the
+ // full ID at startup,
+ // so it may be necessary to set `cloud.resource_id` as a span attribute
+ // instead.
+ //
+ // The exact value to use for `cloud.resource_id` depends on the cloud
+ // provider.
+ // The following well-known definitions MUST be used if you set this
+ // attribute and they apply:
+ //
+ // * **AWS Lambda:** The function
+ // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
+ // Take care not to use the "invoked ARN" directly but replace any
+ // [alias
+ // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html)
+ // with the resolved function version, as the same runtime instance may
+ // be invokable with
+ // multiple different aliases.
+ // * **GCP:** The [URI of the
+ // resource](https://cloud.google.com/iam/docs/full-resource-names)
+ // * **Azure:** The [Fully Qualified Resource
+ // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id)
+ // of the invoked function,
+ // *not* the function app, having the form
+ // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`.
+ // This means that a span attribute MUST be used, as an Azure function
+ // app can host multiple functions that would usually share
+ // a TracerProvider.
+ CloudResourceIDKey = attribute.Key("cloud.resource_id")
+
+ // CloudAvailabilityZoneKey is the attribute Key conforming to the
+ // "cloud.availability_zone" semantic conventions. It represents the cloud
+ // regions often have multiple, isolated locations known as zones to
+ // increase availability. Availability zone represents the zone where the
+ // resource is running.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'us-east-1c'
+ // Note: Availability zones are called "zones" on Alibaba Cloud and Google
+ // Cloud.
+ CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone")
+
+ // CloudPlatformKey is the attribute Key conforming to the "cloud.platform"
+ // semantic conventions. It represents the cloud platform in use.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: The prefix of the service SHOULD match the one specified in
+ // `cloud.provider`.
+ CloudPlatformKey = attribute.Key("cloud.platform")
+)
+
+var (
+ // Alibaba Cloud
+ CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud")
+ // Amazon Web Services
+ CloudProviderAWS = CloudProviderKey.String("aws")
+ // Microsoft Azure
+ CloudProviderAzure = CloudProviderKey.String("azure")
+ // Google Cloud Platform
+ CloudProviderGCP = CloudProviderKey.String("gcp")
+ // Heroku Platform as a Service
+ CloudProviderHeroku = CloudProviderKey.String("heroku")
+ // IBM Cloud
+ CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud")
+ // Tencent Cloud
+ CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud")
+)
+
+var (
+ // Alibaba Cloud Elastic Compute Service
+ CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs")
+ // Alibaba Cloud Function Compute
+ CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc")
+ // Red Hat OpenShift on Alibaba Cloud
+ CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift")
+ // AWS Elastic Compute Cloud
+ CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2")
+ // AWS Elastic Container Service
+ CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs")
+ // AWS Elastic Kubernetes Service
+ CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks")
+ // AWS Lambda
+ CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda")
+ // AWS Elastic Beanstalk
+ CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk")
+ // AWS App Runner
+ CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner")
+ // Red Hat OpenShift on AWS (ROSA)
+ CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift")
+ // Azure Virtual Machines
+ CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm")
+ // Azure Container Instances
+ CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances")
+ // Azure Kubernetes Service
+ CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks")
+ // Azure Functions
+ CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions")
+ // Azure App Service
+ CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service")
+ // Azure Red Hat OpenShift
+ CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift")
+ // Google Cloud Compute Engine (GCE)
+ CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine")
+ // Google Cloud Run
+ CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run")
+ // Google Cloud Kubernetes Engine (GKE)
+ CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine")
+ // Google Cloud Functions (GCF)
+ CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions")
+ // Google Cloud App Engine (GAE)
+ CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine")
+ // Red Hat OpenShift on Google Cloud
+ CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift")
+ // Red Hat OpenShift on IBM Cloud
+ CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift")
+ // Tencent Cloud Cloud Virtual Machine (CVM)
+ CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm")
+ // Tencent Cloud Elastic Kubernetes Service (EKS)
+ CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks")
+ // Tencent Cloud Serverless Cloud Function (SCF)
+ CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf")
+)
+
+// CloudAccountID returns an attribute KeyValue conforming to the
+// "cloud.account.id" semantic conventions. It represents the cloud account ID
+// the resource is assigned to.
+func CloudAccountID(val string) attribute.KeyValue {
+ return CloudAccountIDKey.String(val)
+}
+
+// CloudRegion returns an attribute KeyValue conforming to the
+// "cloud.region" semantic conventions. It represents the geographical region
+// the resource is running.
+func CloudRegion(val string) attribute.KeyValue {
+ return CloudRegionKey.String(val)
+}
+
+// CloudResourceID returns an attribute KeyValue conforming to the
+// "cloud.resource_id" semantic conventions. It represents the cloud
+// provider-specific native identifier of the monitored cloud resource (e.g. an
+// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
+// on AWS, a [fully qualified resource
+// ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id)
+// on Azure, a [full resource
+// name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
+// on GCP)
+func CloudResourceID(val string) attribute.KeyValue {
+ return CloudResourceIDKey.String(val)
+}
+
+// CloudAvailabilityZone returns an attribute KeyValue conforming to the
+// "cloud.availability_zone" semantic conventions. It represents the cloud
+// regions often have multiple, isolated locations known as zones to increase
+// availability. Availability zone represents the zone where the resource is
+// running.
+func CloudAvailabilityZone(val string) attribute.KeyValue {
+ return CloudAvailabilityZoneKey.String(val)
+}
+
+// Resources used by AWS Elastic Container Service (ECS).
+const (
+ // AWSECSContainerARNKey is the attribute Key conforming to the
+ // "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+ // Resource Name (ARN) of an [ECS container
+ // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9'
+ AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn")
+
+ // AWSECSClusterARNKey is the attribute Key conforming to the
+ // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an
+ // [ECS
+ // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+ AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn")
+
+ // AWSECSLaunchtypeKey is the attribute Key conforming to the
+ // "aws.ecs.launchtype" semantic conventions. It represents the [launch
+ // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html)
+ // for an ECS task.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype")
+
+ // AWSECSTaskARNKey is the attribute Key conforming to the
+ // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an
+ // [ECS task
+ // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b'
+ AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn")
+
+ // AWSECSTaskFamilyKey is the attribute Key conforming to the
+ // "aws.ecs.task.family" semantic conventions. It represents the task
+ // definition family this task definition is a member of.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-family'
+ AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family")
+
+ // AWSECSTaskRevisionKey is the attribute Key conforming to the
+ // "aws.ecs.task.revision" semantic conventions. It represents the revision
+ // for this task definition.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '8', '26'
+ AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision")
+)
+
+var (
+ // ec2
+ AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2")
+ // fargate
+ AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate")
+)
+
+// AWSECSContainerARN returns an attribute KeyValue conforming to the
+// "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+// Resource Name (ARN) of an [ECS container
+// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
+func AWSECSContainerARN(val string) attribute.KeyValue {
+ return AWSECSContainerARNKey.String(val)
+}
+
+// AWSECSClusterARN returns an attribute KeyValue conforming to the
+// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS
+// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
+func AWSECSClusterARN(val string) attribute.KeyValue {
+ return AWSECSClusterARNKey.String(val)
+}
+
+// AWSECSTaskARN returns an attribute KeyValue conforming to the
+// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS
+// task
+// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
+func AWSECSTaskARN(val string) attribute.KeyValue {
+ return AWSECSTaskARNKey.String(val)
+}
+
+// AWSECSTaskFamily returns an attribute KeyValue conforming to the
+// "aws.ecs.task.family" semantic conventions. It represents the task
+// definition family this task definition is a member of.
+func AWSECSTaskFamily(val string) attribute.KeyValue {
+ return AWSECSTaskFamilyKey.String(val)
+}
+
+// AWSECSTaskRevision returns an attribute KeyValue conforming to the
+// "aws.ecs.task.revision" semantic conventions. It represents the revision for
+// this task definition.
+func AWSECSTaskRevision(val string) attribute.KeyValue {
+ return AWSECSTaskRevisionKey.String(val)
+}
+
+// Resources used by AWS Elastic Kubernetes Service (EKS).
+const (
+ // AWSEKSClusterARNKey is the attribute Key conforming to the
+ // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an
+ // EKS cluster.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+ AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn")
+)
+
+// AWSEKSClusterARN returns an attribute KeyValue conforming to the
+// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS
+// cluster.
+func AWSEKSClusterARN(val string) attribute.KeyValue {
+ return AWSEKSClusterARNKey.String(val)
+}
+
+// Resources specific to Amazon Web Services.
+const (
+ // AWSLogGroupNamesKey is the attribute Key conforming to the
+ // "aws.log.group.names" semantic conventions. It represents the name(s) of
+ // the AWS log group(s) an application is writing to.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/aws/lambda/my-function', 'opentelemetry-service'
+ // Note: Multiple log groups must be supported for cases like
+ // multi-container applications, where a single application has sidecar
+ // containers, and each write to their own log group.
+ AWSLogGroupNamesKey = attribute.Key("aws.log.group.names")
+
+ // AWSLogGroupARNsKey is the attribute Key conforming to the
+ // "aws.log.group.arns" semantic conventions. It represents the Amazon
+ // Resource Name(s) (ARN) of the AWS log group(s).
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*'
+ // Note: See the [log group ARN format
+ // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
+ AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns")
+
+ // AWSLogStreamNamesKey is the attribute Key conforming to the
+ // "aws.log.stream.names" semantic conventions. It represents the name(s)
+ // of the AWS log stream(s) an application is writing to.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+ AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names")
+
+ // AWSLogStreamARNsKey is the attribute Key conforming to the
+ // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of
+ // the AWS log stream(s).
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+ // Note: See the [log stream ARN format
+ // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
+ // One log group can contain several log streams, so these ARNs necessarily
+ // identify both a log group and a log stream.
+ AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns")
+)
+
+// AWSLogGroupNames returns an attribute KeyValue conforming to the
+// "aws.log.group.names" semantic conventions. It represents the name(s) of the
+// AWS log group(s) an application is writing to.
+func AWSLogGroupNames(val ...string) attribute.KeyValue {
+ return AWSLogGroupNamesKey.StringSlice(val)
+}
+
+// AWSLogGroupARNs returns an attribute KeyValue conforming to the
+// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource
+// Name(s) (ARN) of the AWS log group(s).
+func AWSLogGroupARNs(val ...string) attribute.KeyValue {
+ return AWSLogGroupARNsKey.StringSlice(val)
+}
+
+// AWSLogStreamNames returns an attribute KeyValue conforming to the
+// "aws.log.stream.names" semantic conventions. It represents the name(s) of
+// the AWS log stream(s) an application is writing to.
+func AWSLogStreamNames(val ...string) attribute.KeyValue {
+ return AWSLogStreamNamesKey.StringSlice(val)
+}
+
+// AWSLogStreamARNs returns an attribute KeyValue conforming to the
+// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the
+// AWS log stream(s).
+func AWSLogStreamARNs(val ...string) attribute.KeyValue {
+ return AWSLogStreamARNsKey.StringSlice(val)
+}
+
+// Heroku dyno metadata
+const (
+ // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the
+ // "heroku.release.creation_timestamp" semantic conventions. It represents
+ // the time and date the release was created
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2022-10-23T18:00:42Z'
+ HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp")
+
+ // HerokuReleaseCommitKey is the attribute Key conforming to the
+ // "heroku.release.commit" semantic conventions. It represents the commit
+ // hash for the current release
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec'
+ HerokuReleaseCommitKey = attribute.Key("heroku.release.commit")
+
+ // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id"
+ // semantic conventions. It represents the unique identifier for the
+ // application
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da'
+ HerokuAppIDKey = attribute.Key("heroku.app.id")
+)
+
+// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming
+// to the "heroku.release.creation_timestamp" semantic conventions. It
+// represents the time and date the release was created
+func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue {
+ return HerokuReleaseCreationTimestampKey.String(val)
+}
+
+// HerokuReleaseCommit returns an attribute KeyValue conforming to the
+// "heroku.release.commit" semantic conventions. It represents the commit hash
+// for the current release
+func HerokuReleaseCommit(val string) attribute.KeyValue {
+ return HerokuReleaseCommitKey.String(val)
+}
+
+// HerokuAppID returns an attribute KeyValue conforming to the
+// "heroku.app.id" semantic conventions. It represents the unique identifier
+// for the application
+func HerokuAppID(val string) attribute.KeyValue {
+ return HerokuAppIDKey.String(val)
+}
+
+// A container instance.
+const (
+ // ContainerNameKey is the attribute Key conforming to the "container.name"
+ // semantic conventions. It represents the container name used by container
+ // runtime.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-autoconf'
+ ContainerNameKey = attribute.Key("container.name")
+
+ // ContainerIDKey is the attribute Key conforming to the "container.id"
+ // semantic conventions. It represents the container ID. Usually a UUID, as
+ // for example used to [identify Docker
+ // containers](https://docs.docker.com/engine/reference/run/#container-identification).
+ // The UUID might be abbreviated.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'a3bf90e006b2'
+ ContainerIDKey = attribute.Key("container.id")
+
+ // ContainerRuntimeKey is the attribute Key conforming to the
+ // "container.runtime" semantic conventions. It represents the container
+ // runtime managing this container.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'docker', 'containerd', 'rkt'
+ ContainerRuntimeKey = attribute.Key("container.runtime")
+
+ // ContainerImageNameKey is the attribute Key conforming to the
+ // "container.image.name" semantic conventions. It represents the name of
+ // the image the container was built on.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'gcr.io/opentelemetry/operator'
+ ContainerImageNameKey = attribute.Key("container.image.name")
+
+ // ContainerImageTagKey is the attribute Key conforming to the
+ // "container.image.tag" semantic conventions. It represents the container
+ // image tag.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '0.1'
+ ContainerImageTagKey = attribute.Key("container.image.tag")
+)
+
+// ContainerName returns an attribute KeyValue conforming to the
+// "container.name" semantic conventions. It represents the container name used
+// by container runtime.
+func ContainerName(val string) attribute.KeyValue {
+ return ContainerNameKey.String(val)
+}
+
+// ContainerID returns an attribute KeyValue conforming to the
+// "container.id" semantic conventions. It represents the container ID. Usually
+// a UUID, as for example used to [identify Docker
+// containers](https://docs.docker.com/engine/reference/run/#container-identification).
+// The UUID might be abbreviated.
+func ContainerID(val string) attribute.KeyValue {
+ return ContainerIDKey.String(val)
+}
+
+// ContainerRuntime returns an attribute KeyValue conforming to the
+// "container.runtime" semantic conventions. It represents the container
+// runtime managing this container.
+func ContainerRuntime(val string) attribute.KeyValue {
+ return ContainerRuntimeKey.String(val)
+}
+
+// ContainerImageName returns an attribute KeyValue conforming to the
+// "container.image.name" semantic conventions. It represents the name of the
+// image the container was built on.
+func ContainerImageName(val string) attribute.KeyValue {
+ return ContainerImageNameKey.String(val)
+}
+
+// ContainerImageTag returns an attribute KeyValue conforming to the
+// "container.image.tag" semantic conventions. It represents the container
+// image tag.
+func ContainerImageTag(val string) attribute.KeyValue {
+ return ContainerImageTagKey.String(val)
+}
+
+// The software deployment.
+const (
+ // DeploymentEnvironmentKey is the attribute Key conforming to the
+ // "deployment.environment" semantic conventions. It represents the name of
+ // the [deployment
+ // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka
+ // deployment tier).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'staging', 'production'
+ DeploymentEnvironmentKey = attribute.Key("deployment.environment")
+)
+
+// DeploymentEnvironment returns an attribute KeyValue conforming to the
+// "deployment.environment" semantic conventions. It represents the name of the
+// [deployment
+// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka
+// deployment tier).
+func DeploymentEnvironment(val string) attribute.KeyValue {
+ return DeploymentEnvironmentKey.String(val)
+}
+
+// The device on which the process represented by this resource is running.
+const (
+ // DeviceIDKey is the attribute Key conforming to the "device.id" semantic
+ // conventions. It represents a unique identifier representing the device
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092'
+ // Note: The device identifier MUST only be defined using the values
+ // outlined below. This value is not an advertising identifier and MUST NOT
+ // be used as such. On iOS (Swift or Objective-C), this value MUST be equal
+ // to the [vendor
+ // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor).
+ // On Android (Java or Kotlin), this value MUST be equal to the Firebase
+ // Installation ID or a globally unique UUID which is persisted across
+ // sessions in your application. More information can be found
+ // [here](https://developer.android.com/training/articles/user-data-ids) on
+ // best practices and exact implementation details. Caution should be taken
+ // when storing personal data or anything which can identify a user. GDPR
+ // and data protection laws may apply, ensure you do your own due
+ // diligence.
+ DeviceIDKey = attribute.Key("device.id")
+
+ // DeviceModelIdentifierKey is the attribute Key conforming to the
+ // "device.model.identifier" semantic conventions. It represents the model
+ // identifier for the device
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'iPhone3,4', 'SM-G920F'
+ // Note: It's recommended this value represents a machine readable version
+ // of the model identifier rather than the market or consumer-friendly name
+ // of the device.
+ DeviceModelIdentifierKey = attribute.Key("device.model.identifier")
+
+ // DeviceModelNameKey is the attribute Key conforming to the
+ // "device.model.name" semantic conventions. It represents the marketing
+ // name for the device model
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6'
+ // Note: It's recommended this value represents a human readable version of
+ // the device model rather than a machine readable alternative.
+ DeviceModelNameKey = attribute.Key("device.model.name")
+
+ // DeviceManufacturerKey is the attribute Key conforming to the
+ // "device.manufacturer" semantic conventions. It represents the name of
+ // the device manufacturer
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Apple', 'Samsung'
+ // Note: The Android OS provides this field via
+ // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER).
+ // iOS apps SHOULD hardcode the value `Apple`.
+ DeviceManufacturerKey = attribute.Key("device.manufacturer")
+)
+
+// DeviceID returns an attribute KeyValue conforming to the "device.id"
+// semantic conventions. It represents a unique identifier representing the
+// device
+func DeviceID(val string) attribute.KeyValue {
+ return DeviceIDKey.String(val)
+}
+
+// DeviceModelIdentifier returns an attribute KeyValue conforming to the
+// "device.model.identifier" semantic conventions. It represents the model
+// identifier for the device
+func DeviceModelIdentifier(val string) attribute.KeyValue {
+ return DeviceModelIdentifierKey.String(val)
+}
+
+// DeviceModelName returns an attribute KeyValue conforming to the
+// "device.model.name" semantic conventions. It represents the marketing name
+// for the device model
+func DeviceModelName(val string) attribute.KeyValue {
+ return DeviceModelNameKey.String(val)
+}
+
+// DeviceManufacturer returns an attribute KeyValue conforming to the
+// "device.manufacturer" semantic conventions. It represents the name of the
+// device manufacturer
+func DeviceManufacturer(val string) attribute.KeyValue {
+ return DeviceManufacturerKey.String(val)
+}
+
+// A serverless instance.
+const (
+ // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic
+ // conventions. It represents the name of the single function that this
+ // runtime instance executes.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'my-function', 'myazurefunctionapp/some-function-name'
+ // Note: This is the name of the function as configured/deployed on the
+ // FaaS
+ // platform and is usually different from the name of the callback
+ // function (which may be stored in the
+ // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-general.md#source-code-attributes)
+ // span attributes).
+ //
+ // For some cloud providers, the above definition is ambiguous. The
+ // following
+ // definition of function name MUST be used for this attribute
+ // (and consequently the span name) for the listed cloud
+ // providers/products:
+ //
+ // * **Azure:** The full name `/`, i.e., function app name
+ // followed by a forward slash followed by the function name (this form
+ // can also be seen in the resource JSON for the function).
+ // This means that a span attribute MUST be used, as an Azure function
+ // app can host multiple functions that would usually share
+ // a TracerProvider (see also the `cloud.resource_id` attribute).
+ FaaSNameKey = attribute.Key("faas.name")
+
+ // FaaSVersionKey is the attribute Key conforming to the "faas.version"
+ // semantic conventions. It represents the immutable version of the
+ // function being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '26', 'pinkfroid-00002'
+ // Note: Depending on the cloud provider and platform, use:
+ //
+ // * **AWS Lambda:** The [function
+ // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html)
+ // (an integer represented as a decimal string).
+ // * **Google Cloud Run:** The
+ // [revision](https://cloud.google.com/run/docs/managing/revisions)
+ // (i.e., the function name plus the revision suffix).
+ // * **Google Cloud Functions:** The value of the
+ // [`K_REVISION` environment
+ // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically).
+ // * **Azure Functions:** Not applicable. Do not set this attribute.
+ FaaSVersionKey = attribute.Key("faas.version")
+
+ // FaaSInstanceKey is the attribute Key conforming to the "faas.instance"
+ // semantic conventions. It represents the execution environment ID as a
+ // string, that will be potentially reused for other invocations to the
+ // same function/function version.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de'
+ // Note: * **AWS Lambda:** Use the (full) log stream name.
+ FaaSInstanceKey = attribute.Key("faas.instance")
+
+ // FaaSMaxMemoryKey is the attribute Key conforming to the
+ // "faas.max_memory" semantic conventions. It represents the amount of
+ // memory available to the serverless function converted to Bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 134217728
+ // Note: It's recommended to set this attribute since e.g. too little
+ // memory can easily stop a Java AWS Lambda function from working
+ // correctly. On AWS Lambda, the environment variable
+ // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must
+ // be multiplied by 1,048,576).
+ FaaSMaxMemoryKey = attribute.Key("faas.max_memory")
+)
+
+// FaaSName returns an attribute KeyValue conforming to the "faas.name"
+// semantic conventions. It represents the name of the single function that
+// this runtime instance executes.
+func FaaSName(val string) attribute.KeyValue {
+ return FaaSNameKey.String(val)
+}
+
+// FaaSVersion returns an attribute KeyValue conforming to the
+// "faas.version" semantic conventions. It represents the immutable version of
+// the function being executed.
+func FaaSVersion(val string) attribute.KeyValue {
+ return FaaSVersionKey.String(val)
+}
+
+// FaaSInstance returns an attribute KeyValue conforming to the
+// "faas.instance" semantic conventions. It represents the execution
+// environment ID as a string, that will be potentially reused for other
+// invocations to the same function/function version.
+func FaaSInstance(val string) attribute.KeyValue {
+ return FaaSInstanceKey.String(val)
+}
+
+// FaaSMaxMemory returns an attribute KeyValue conforming to the
+// "faas.max_memory" semantic conventions. It represents the amount of memory
+// available to the serverless function converted to Bytes.
+func FaaSMaxMemory(val int) attribute.KeyValue {
+ return FaaSMaxMemoryKey.Int(val)
+}
+
+// A host is defined as a general computing instance.
+const (
+ // HostIDKey is the attribute Key conforming to the "host.id" semantic
+ // conventions. It represents the unique host ID. For Cloud, this must be
+ // the instance_id assigned by the cloud provider. For non-containerized
+ // systems, this should be the `machine-id`. See the table below for the
+ // sources to use to determine the `machine-id` based on operating system.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'fdbf79e8af94cb7f9e8df36789187052'
+ HostIDKey = attribute.Key("host.id")
+
+ // HostNameKey is the attribute Key conforming to the "host.name" semantic
+ // conventions. It represents the name of the host. On Unix systems, it may
+ // contain what the hostname command returns, or the fully qualified
+ // hostname, or another name specified by the user.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-test'
+ HostNameKey = attribute.Key("host.name")
+
+ // HostTypeKey is the attribute Key conforming to the "host.type" semantic
+ // conventions. It represents the type of host. For Cloud, this must be the
+ // machine type.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'n1-standard-1'
+ HostTypeKey = attribute.Key("host.type")
+
+ // HostArchKey is the attribute Key conforming to the "host.arch" semantic
+ // conventions. It represents the CPU architecture the host system is
+ // running on.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ HostArchKey = attribute.Key("host.arch")
+
+ // HostImageNameKey is the attribute Key conforming to the
+ // "host.image.name" semantic conventions. It represents the name of the VM
+ // image or OS install the host was instantiated from.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905'
+ HostImageNameKey = attribute.Key("host.image.name")
+
+ // HostImageIDKey is the attribute Key conforming to the "host.image.id"
+ // semantic conventions. It represents the vM image ID. For Cloud, this
+ // value is from the provider.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'ami-07b06b442921831e5'
+ HostImageIDKey = attribute.Key("host.image.id")
+
+ // HostImageVersionKey is the attribute Key conforming to the
+ // "host.image.version" semantic conventions. It represents the version
+ // string of the VM image as defined in [Version
+ // Attributes](README.md#version-attributes).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '0.1'
+ HostImageVersionKey = attribute.Key("host.image.version")
+)
+
+var (
+ // AMD64
+ HostArchAMD64 = HostArchKey.String("amd64")
+ // ARM32
+ HostArchARM32 = HostArchKey.String("arm32")
+ // ARM64
+ HostArchARM64 = HostArchKey.String("arm64")
+ // Itanium
+ HostArchIA64 = HostArchKey.String("ia64")
+ // 32-bit PowerPC
+ HostArchPPC32 = HostArchKey.String("ppc32")
+ // 64-bit PowerPC
+ HostArchPPC64 = HostArchKey.String("ppc64")
+ // IBM z/Architecture
+ HostArchS390x = HostArchKey.String("s390x")
+ // 32-bit x86
+ HostArchX86 = HostArchKey.String("x86")
+)
+
+// HostID returns an attribute KeyValue conforming to the "host.id" semantic
+// conventions. It represents the unique host ID. For Cloud, this must be the
+// instance_id assigned by the cloud provider. For non-containerized systems,
+// this should be the `machine-id`. See the table below for the sources to use
+// to determine the `machine-id` based on operating system.
+func HostID(val string) attribute.KeyValue {
+ return HostIDKey.String(val)
+}
+
+// HostName returns an attribute KeyValue conforming to the "host.name"
+// semantic conventions. It represents the name of the host. On Unix systems,
+// it may contain what the hostname command returns, or the fully qualified
+// hostname, or another name specified by the user.
+func HostName(val string) attribute.KeyValue {
+ return HostNameKey.String(val)
+}
+
+// HostType returns an attribute KeyValue conforming to the "host.type"
+// semantic conventions. It represents the type of host. For Cloud, this must
+// be the machine type.
+func HostType(val string) attribute.KeyValue {
+ return HostTypeKey.String(val)
+}
+
+// HostImageName returns an attribute KeyValue conforming to the
+// "host.image.name" semantic conventions. It represents the name of the VM
+// image or OS install the host was instantiated from.
+func HostImageName(val string) attribute.KeyValue {
+ return HostImageNameKey.String(val)
+}
+
+// HostImageID returns an attribute KeyValue conforming to the
+// "host.image.id" semantic conventions. It represents the vM image ID. For
+// Cloud, this value is from the provider.
+func HostImageID(val string) attribute.KeyValue {
+ return HostImageIDKey.String(val)
+}
+
+// HostImageVersion returns an attribute KeyValue conforming to the
+// "host.image.version" semantic conventions. It represents the version string
+// of the VM image as defined in [Version
+// Attributes](README.md#version-attributes).
+func HostImageVersion(val string) attribute.KeyValue {
+ return HostImageVersionKey.String(val)
+}
+
+// A Kubernetes Cluster.
+const (
+ // K8SClusterNameKey is the attribute Key conforming to the
+ // "k8s.cluster.name" semantic conventions. It represents the name of the
+ // cluster.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-cluster'
+ K8SClusterNameKey = attribute.Key("k8s.cluster.name")
+)
+
+// K8SClusterName returns an attribute KeyValue conforming to the
+// "k8s.cluster.name" semantic conventions. It represents the name of the
+// cluster.
+func K8SClusterName(val string) attribute.KeyValue {
+ return K8SClusterNameKey.String(val)
+}
+
+// A Kubernetes Node object.
+const (
+ // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name"
+ // semantic conventions. It represents the name of the Node.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'node-1'
+ K8SNodeNameKey = attribute.Key("k8s.node.name")
+
+ // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid"
+ // semantic conventions. It represents the UID of the Node.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2'
+ K8SNodeUIDKey = attribute.Key("k8s.node.uid")
+)
+
+// K8SNodeName returns an attribute KeyValue conforming to the
+// "k8s.node.name" semantic conventions. It represents the name of the Node.
+func K8SNodeName(val string) attribute.KeyValue {
+ return K8SNodeNameKey.String(val)
+}
+
+// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid"
+// semantic conventions. It represents the UID of the Node.
+func K8SNodeUID(val string) attribute.KeyValue {
+ return K8SNodeUIDKey.String(val)
+}
+
+// A Kubernetes Namespace.
+const (
+ // K8SNamespaceNameKey is the attribute Key conforming to the
+ // "k8s.namespace.name" semantic conventions. It represents the name of the
+ // namespace that the pod is running in.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'default'
+ K8SNamespaceNameKey = attribute.Key("k8s.namespace.name")
+)
+
+// K8SNamespaceName returns an attribute KeyValue conforming to the
+// "k8s.namespace.name" semantic conventions. It represents the name of the
+// namespace that the pod is running in.
+func K8SNamespaceName(val string) attribute.KeyValue {
+ return K8SNamespaceNameKey.String(val)
+}
+
+// A Kubernetes Pod object.
+const (
+ // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid"
+ // semantic conventions. It represents the UID of the Pod.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SPodUIDKey = attribute.Key("k8s.pod.uid")
+
+ // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name"
+ // semantic conventions. It represents the name of the Pod.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-pod-autoconf'
+ K8SPodNameKey = attribute.Key("k8s.pod.name")
+)
+
+// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid"
+// semantic conventions. It represents the UID of the Pod.
+func K8SPodUID(val string) attribute.KeyValue {
+ return K8SPodUIDKey.String(val)
+}
+
+// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name"
+// semantic conventions. It represents the name of the Pod.
+func K8SPodName(val string) attribute.KeyValue {
+ return K8SPodNameKey.String(val)
+}
+
+// A container in a
+// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates).
+const (
+ // K8SContainerNameKey is the attribute Key conforming to the
+ // "k8s.container.name" semantic conventions. It represents the name of the
+ // Container from Pod specification, must be unique within a Pod. Container
+ // runtime usually uses different globally unique name (`container.name`).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'redis'
+ K8SContainerNameKey = attribute.Key("k8s.container.name")
+
+ // K8SContainerRestartCountKey is the attribute Key conforming to the
+ // "k8s.container.restart_count" semantic conventions. It represents the
+ // number of times the container was restarted. This attribute can be used
+ // to identify a particular container (running or stopped) within a
+ // container spec.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 0, 2
+ K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count")
+)
+
+// K8SContainerName returns an attribute KeyValue conforming to the
+// "k8s.container.name" semantic conventions. It represents the name of the
+// Container from Pod specification, must be unique within a Pod. Container
+// runtime usually uses different globally unique name (`container.name`).
+func K8SContainerName(val string) attribute.KeyValue {
+ return K8SContainerNameKey.String(val)
+}
+
+// K8SContainerRestartCount returns an attribute KeyValue conforming to the
+// "k8s.container.restart_count" semantic conventions. It represents the number
+// of times the container was restarted. This attribute can be used to identify
+// a particular container (running or stopped) within a container spec.
+func K8SContainerRestartCount(val int) attribute.KeyValue {
+ return K8SContainerRestartCountKey.Int(val)
+}
+
+// A Kubernetes ReplicaSet object.
+const (
+ // K8SReplicaSetUIDKey is the attribute Key conforming to the
+ // "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+ // ReplicaSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid")
+
+ // K8SReplicaSetNameKey is the attribute Key conforming to the
+ // "k8s.replicaset.name" semantic conventions. It represents the name of
+ // the ReplicaSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name")
+)
+
+// K8SReplicaSetUID returns an attribute KeyValue conforming to the
+// "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+// ReplicaSet.
+func K8SReplicaSetUID(val string) attribute.KeyValue {
+ return K8SReplicaSetUIDKey.String(val)
+}
+
+// K8SReplicaSetName returns an attribute KeyValue conforming to the
+// "k8s.replicaset.name" semantic conventions. It represents the name of the
+// ReplicaSet.
+func K8SReplicaSetName(val string) attribute.KeyValue {
+ return K8SReplicaSetNameKey.String(val)
+}
+
+// A Kubernetes Deployment object.
+const (
+ // K8SDeploymentUIDKey is the attribute Key conforming to the
+ // "k8s.deployment.uid" semantic conventions. It represents the UID of the
+ // Deployment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid")
+
+ // K8SDeploymentNameKey is the attribute Key conforming to the
+ // "k8s.deployment.name" semantic conventions. It represents the name of
+ // the Deployment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SDeploymentNameKey = attribute.Key("k8s.deployment.name")
+)
+
+// K8SDeploymentUID returns an attribute KeyValue conforming to the
+// "k8s.deployment.uid" semantic conventions. It represents the UID of the
+// Deployment.
+func K8SDeploymentUID(val string) attribute.KeyValue {
+ return K8SDeploymentUIDKey.String(val)
+}
+
+// K8SDeploymentName returns an attribute KeyValue conforming to the
+// "k8s.deployment.name" semantic conventions. It represents the name of the
+// Deployment.
+func K8SDeploymentName(val string) attribute.KeyValue {
+ return K8SDeploymentNameKey.String(val)
+}
+
+// A Kubernetes StatefulSet object.
+const (
+ // K8SStatefulSetUIDKey is the attribute Key conforming to the
+ // "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+ // StatefulSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid")
+
+ // K8SStatefulSetNameKey is the attribute Key conforming to the
+ // "k8s.statefulset.name" semantic conventions. It represents the name of
+ // the StatefulSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name")
+)
+
+// K8SStatefulSetUID returns an attribute KeyValue conforming to the
+// "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+// StatefulSet.
+func K8SStatefulSetUID(val string) attribute.KeyValue {
+ return K8SStatefulSetUIDKey.String(val)
+}
+
+// K8SStatefulSetName returns an attribute KeyValue conforming to the
+// "k8s.statefulset.name" semantic conventions. It represents the name of the
+// StatefulSet.
+func K8SStatefulSetName(val string) attribute.KeyValue {
+ return K8SStatefulSetNameKey.String(val)
+}
+
+// A Kubernetes DaemonSet object.
+const (
+ // K8SDaemonSetUIDKey is the attribute Key conforming to the
+ // "k8s.daemonset.uid" semantic conventions. It represents the UID of the
+ // DaemonSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid")
+
+ // K8SDaemonSetNameKey is the attribute Key conforming to the
+ // "k8s.daemonset.name" semantic conventions. It represents the name of the
+ // DaemonSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name")
+)
+
+// K8SDaemonSetUID returns an attribute KeyValue conforming to the
+// "k8s.daemonset.uid" semantic conventions. It represents the UID of the
+// DaemonSet.
+func K8SDaemonSetUID(val string) attribute.KeyValue {
+ return K8SDaemonSetUIDKey.String(val)
+}
+
+// K8SDaemonSetName returns an attribute KeyValue conforming to the
+// "k8s.daemonset.name" semantic conventions. It represents the name of the
+// DaemonSet.
+func K8SDaemonSetName(val string) attribute.KeyValue {
+ return K8SDaemonSetNameKey.String(val)
+}
+
+// A Kubernetes Job object.
+const (
+ // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid"
+ // semantic conventions. It represents the UID of the Job.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SJobUIDKey = attribute.Key("k8s.job.uid")
+
+ // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name"
+ // semantic conventions. It represents the name of the Job.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SJobNameKey = attribute.Key("k8s.job.name")
+)
+
+// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid"
+// semantic conventions. It represents the UID of the Job.
+func K8SJobUID(val string) attribute.KeyValue {
+ return K8SJobUIDKey.String(val)
+}
+
+// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name"
+// semantic conventions. It represents the name of the Job.
+func K8SJobName(val string) attribute.KeyValue {
+ return K8SJobNameKey.String(val)
+}
+
+// A Kubernetes CronJob object.
+const (
+ // K8SCronJobUIDKey is the attribute Key conforming to the
+ // "k8s.cronjob.uid" semantic conventions. It represents the UID of the
+ // CronJob.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid")
+
+ // K8SCronJobNameKey is the attribute Key conforming to the
+ // "k8s.cronjob.name" semantic conventions. It represents the name of the
+ // CronJob.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SCronJobNameKey = attribute.Key("k8s.cronjob.name")
+)
+
+// K8SCronJobUID returns an attribute KeyValue conforming to the
+// "k8s.cronjob.uid" semantic conventions. It represents the UID of the
+// CronJob.
+func K8SCronJobUID(val string) attribute.KeyValue {
+ return K8SCronJobUIDKey.String(val)
+}
+
+// K8SCronJobName returns an attribute KeyValue conforming to the
+// "k8s.cronjob.name" semantic conventions. It represents the name of the
+// CronJob.
+func K8SCronJobName(val string) attribute.KeyValue {
+ return K8SCronJobNameKey.String(val)
+}
+
+// The operating system (OS) on which the process represented by this resource
+// is running.
+const (
+ // OSTypeKey is the attribute Key conforming to the "os.type" semantic
+ // conventions. It represents the operating system type.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ OSTypeKey = attribute.Key("os.type")
+
+ // OSDescriptionKey is the attribute Key conforming to the "os.description"
+ // semantic conventions. It represents the human readable (not intended to
+ // be parsed) OS version information, like e.g. reported by `ver` or
+ // `lsb_release -a` commands.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1
+ // LTS'
+ OSDescriptionKey = attribute.Key("os.description")
+
+ // OSNameKey is the attribute Key conforming to the "os.name" semantic
+ // conventions. It represents the human readable operating system name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'iOS', 'Android', 'Ubuntu'
+ OSNameKey = attribute.Key("os.name")
+
+ // OSVersionKey is the attribute Key conforming to the "os.version"
+ // semantic conventions. It represents the version string of the operating
+ // system as defined in [Version
+ // Attributes](../../resource/semantic_conventions/README.md#version-attributes).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '14.2.1', '18.04.1'
+ OSVersionKey = attribute.Key("os.version")
+)
+
+var (
+ // Microsoft Windows
+ OSTypeWindows = OSTypeKey.String("windows")
+ // Linux
+ OSTypeLinux = OSTypeKey.String("linux")
+ // Apple Darwin
+ OSTypeDarwin = OSTypeKey.String("darwin")
+ // FreeBSD
+ OSTypeFreeBSD = OSTypeKey.String("freebsd")
+ // NetBSD
+ OSTypeNetBSD = OSTypeKey.String("netbsd")
+ // OpenBSD
+ OSTypeOpenBSD = OSTypeKey.String("openbsd")
+ // DragonFly BSD
+ OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd")
+ // HP-UX (Hewlett Packard Unix)
+ OSTypeHPUX = OSTypeKey.String("hpux")
+ // AIX (Advanced Interactive eXecutive)
+ OSTypeAIX = OSTypeKey.String("aix")
+ // SunOS, Oracle Solaris
+ OSTypeSolaris = OSTypeKey.String("solaris")
+ // IBM z/OS
+ OSTypeZOS = OSTypeKey.String("z_os")
+)
+
+// OSDescription returns an attribute KeyValue conforming to the
+// "os.description" semantic conventions. It represents the human readable (not
+// intended to be parsed) OS version information, like e.g. reported by `ver`
+// or `lsb_release -a` commands.
+func OSDescription(val string) attribute.KeyValue {
+ return OSDescriptionKey.String(val)
+}
+
+// OSName returns an attribute KeyValue conforming to the "os.name" semantic
+// conventions. It represents the human readable operating system name.
+func OSName(val string) attribute.KeyValue {
+ return OSNameKey.String(val)
+}
+
+// OSVersion returns an attribute KeyValue conforming to the "os.version"
+// semantic conventions. It represents the version string of the operating
+// system as defined in [Version
+// Attributes](../../resource/semantic_conventions/README.md#version-attributes).
+func OSVersion(val string) attribute.KeyValue {
+ return OSVersionKey.String(val)
+}
+
+// An operating system process.
+const (
+ // ProcessPIDKey is the attribute Key conforming to the "process.pid"
+ // semantic conventions. It represents the process identifier (PID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 1234
+ ProcessPIDKey = attribute.Key("process.pid")
+
+ // ProcessParentPIDKey is the attribute Key conforming to the
+ // "process.parent_pid" semantic conventions. It represents the parent
+ // Process identifier (PID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 111
+ ProcessParentPIDKey = attribute.Key("process.parent_pid")
+
+ // ProcessExecutableNameKey is the attribute Key conforming to the
+ // "process.executable.name" semantic conventions. It represents the name
+ // of the process executable. On Linux based systems, can be set to the
+ // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name
+ // of `GetProcessImageFileNameW`.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: 'otelcol'
+ ProcessExecutableNameKey = attribute.Key("process.executable.name")
+
+ // ProcessExecutablePathKey is the attribute Key conforming to the
+ // "process.executable.path" semantic conventions. It represents the full
+ // path to the process executable. On Linux based systems, can be set to
+ // the target of `proc/[pid]/exe`. On Windows, can be set to the result of
+ // `GetProcessImageFileNameW`.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: '/usr/bin/cmd/otelcol'
+ ProcessExecutablePathKey = attribute.Key("process.executable.path")
+
+ // ProcessCommandKey is the attribute Key conforming to the
+ // "process.command" semantic conventions. It represents the command used
+ // to launch the process (i.e. the command name). On Linux based systems,
+ // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can
+ // be set to the first parameter extracted from `GetCommandLineW`.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: 'cmd/otelcol'
+ ProcessCommandKey = attribute.Key("process.command")
+
+ // ProcessCommandLineKey is the attribute Key conforming to the
+ // "process.command_line" semantic conventions. It represents the full
+ // command used to launch the process as a single string representing the
+ // full command. On Windows, can be set to the result of `GetCommandLineW`.
+ // Do not set this if you have to assemble it just for monitoring; use
+ // `process.command_args` instead.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"'
+ ProcessCommandLineKey = attribute.Key("process.command_line")
+
+ // ProcessCommandArgsKey is the attribute Key conforming to the
+ // "process.command_args" semantic conventions. It represents the all the
+ // command arguments (including the command/executable itself) as received
+ // by the process. On Linux-based systems (and some other Unixoid systems
+ // supporting procfs), can be set according to the list of null-delimited
+ // strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
+ // this would be the full argv vector passed to `main`.
+ //
+ // Type: string[]
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: 'cmd/otecol', '--config=config.yaml'
+ ProcessCommandArgsKey = attribute.Key("process.command_args")
+
+ // ProcessOwnerKey is the attribute Key conforming to the "process.owner"
+ // semantic conventions. It represents the username of the user that owns
+ // the process.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'root'
+ ProcessOwnerKey = attribute.Key("process.owner")
+)
+
+// ProcessPID returns an attribute KeyValue conforming to the "process.pid"
+// semantic conventions. It represents the process identifier (PID).
+func ProcessPID(val int) attribute.KeyValue {
+ return ProcessPIDKey.Int(val)
+}
+
+// ProcessParentPID returns an attribute KeyValue conforming to the
+// "process.parent_pid" semantic conventions. It represents the parent Process
+// identifier (PID).
+func ProcessParentPID(val int) attribute.KeyValue {
+ return ProcessParentPIDKey.Int(val)
+}
+
+// ProcessExecutableName returns an attribute KeyValue conforming to the
+// "process.executable.name" semantic conventions. It represents the name of
+// the process executable. On Linux based systems, can be set to the `Name` in
+// `proc/[pid]/status`. On Windows, can be set to the base name of
+// `GetProcessImageFileNameW`.
+func ProcessExecutableName(val string) attribute.KeyValue {
+ return ProcessExecutableNameKey.String(val)
+}
+
+// ProcessExecutablePath returns an attribute KeyValue conforming to the
+// "process.executable.path" semantic conventions. It represents the full path
+// to the process executable. On Linux based systems, can be set to the target
+// of `proc/[pid]/exe`. On Windows, can be set to the result of
+// `GetProcessImageFileNameW`.
+func ProcessExecutablePath(val string) attribute.KeyValue {
+ return ProcessExecutablePathKey.String(val)
+}
+
+// ProcessCommand returns an attribute KeyValue conforming to the
+// "process.command" semantic conventions. It represents the command used to
+// launch the process (i.e. the command name). On Linux based systems, can be
+// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to
+// the first parameter extracted from `GetCommandLineW`.
+func ProcessCommand(val string) attribute.KeyValue {
+ return ProcessCommandKey.String(val)
+}
+
+// ProcessCommandLine returns an attribute KeyValue conforming to the
+// "process.command_line" semantic conventions. It represents the full command
+// used to launch the process as a single string representing the full command.
+// On Windows, can be set to the result of `GetCommandLineW`. Do not set this
+// if you have to assemble it just for monitoring; use `process.command_args`
+// instead.
+func ProcessCommandLine(val string) attribute.KeyValue {
+ return ProcessCommandLineKey.String(val)
+}
+
+// ProcessCommandArgs returns an attribute KeyValue conforming to the
+// "process.command_args" semantic conventions. It represents the all the
+// command arguments (including the command/executable itself) as received by
+// the process. On Linux-based systems (and some other Unixoid systems
+// supporting procfs), can be set according to the list of null-delimited
+// strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
+// this would be the full argv vector passed to `main`.
+func ProcessCommandArgs(val ...string) attribute.KeyValue {
+ return ProcessCommandArgsKey.StringSlice(val)
+}
+
+// ProcessOwner returns an attribute KeyValue conforming to the
+// "process.owner" semantic conventions. It represents the username of the user
+// that owns the process.
+func ProcessOwner(val string) attribute.KeyValue {
+ return ProcessOwnerKey.String(val)
+}
+
+// The single (language) runtime instance which is monitored.
+const (
+ // ProcessRuntimeNameKey is the attribute Key conforming to the
+ // "process.runtime.name" semantic conventions. It represents the name of
+ // the runtime of this process. For compiled native binaries, this SHOULD
+ // be the name of the compiler.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'OpenJDK Runtime Environment'
+ ProcessRuntimeNameKey = attribute.Key("process.runtime.name")
+
+ // ProcessRuntimeVersionKey is the attribute Key conforming to the
+ // "process.runtime.version" semantic conventions. It represents the
+ // version of the runtime of this process, as returned by the runtime
+ // without modification.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '14.0.2'
+ ProcessRuntimeVersionKey = attribute.Key("process.runtime.version")
+
+ // ProcessRuntimeDescriptionKey is the attribute Key conforming to the
+ // "process.runtime.description" semantic conventions. It represents an
+ // additional description about the runtime of the process, for example a
+ // specific vendor customization of the runtime environment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0'
+ ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description")
+)
+
+// ProcessRuntimeName returns an attribute KeyValue conforming to the
+// "process.runtime.name" semantic conventions. It represents the name of the
+// runtime of this process. For compiled native binaries, this SHOULD be the
+// name of the compiler.
+func ProcessRuntimeName(val string) attribute.KeyValue {
+ return ProcessRuntimeNameKey.String(val)
+}
+
+// ProcessRuntimeVersion returns an attribute KeyValue conforming to the
+// "process.runtime.version" semantic conventions. It represents the version of
+// the runtime of this process, as returned by the runtime without
+// modification.
+func ProcessRuntimeVersion(val string) attribute.KeyValue {
+ return ProcessRuntimeVersionKey.String(val)
+}
+
+// ProcessRuntimeDescription returns an attribute KeyValue conforming to the
+// "process.runtime.description" semantic conventions. It represents an
+// additional description about the runtime of the process, for example a
+// specific vendor customization of the runtime environment.
+func ProcessRuntimeDescription(val string) attribute.KeyValue {
+ return ProcessRuntimeDescriptionKey.String(val)
+}
+
+// A service instance.
+const (
+ // ServiceNameKey is the attribute Key conforming to the "service.name"
+ // semantic conventions. It represents the logical name of the service.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'shoppingcart'
+ // Note: MUST be the same for all instances of horizontally scaled
+ // services. If the value was not specified, SDKs MUST fallback to
+ // `unknown_service:` concatenated with
+ // [`process.executable.name`](process.md#process), e.g.
+ // `unknown_service:bash`. If `process.executable.name` is not available,
+ // the value MUST be set to `unknown_service`.
+ ServiceNameKey = attribute.Key("service.name")
+)
+
+// ServiceName returns an attribute KeyValue conforming to the
+// "service.name" semantic conventions. It represents the logical name of the
+// service.
+func ServiceName(val string) attribute.KeyValue {
+ return ServiceNameKey.String(val)
+}
+
+// A service instance.
+const (
+ // ServiceNamespaceKey is the attribute Key conforming to the
+ // "service.namespace" semantic conventions. It represents a namespace for
+ // `service.name`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Shop'
+ // Note: A string value having a meaning that helps to distinguish a group
+ // of services, for example the team name that owns a group of services.
+ // `service.name` is expected to be unique within the same namespace. If
+ // `service.namespace` is not specified in the Resource then `service.name`
+ // is expected to be unique for all services that have no explicit
+ // namespace defined (so the empty/unspecified namespace is simply one more
+ // valid namespace). Zero-length namespace string is assumed equal to
+ // unspecified namespace.
+ ServiceNamespaceKey = attribute.Key("service.namespace")
+
+ // ServiceInstanceIDKey is the attribute Key conforming to the
+ // "service.instance.id" semantic conventions. It represents the string ID
+ // of the service instance.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'my-k8s-pod-deployment-1',
+ // '627cc493-f310-47de-96bd-71410b7dec09'
+ // Note: MUST be unique for each instance of the same
+ // `service.namespace,service.name` pair (in other words
+ // `service.namespace,service.name,service.instance.id` triplet MUST be
+ // globally unique). The ID helps to distinguish instances of the same
+ // service that exist at the same time (e.g. instances of a horizontally
+ // scaled service). It is preferable for the ID to be persistent and stay
+ // the same for the lifetime of the service instance, however it is
+ // acceptable that the ID is ephemeral and changes during important
+ // lifetime events for the service (e.g. service restarts). If the service
+ // has no inherent unique ID that can be used as the value of this
+ // attribute it is recommended to generate a random Version 1 or Version 4
+ // RFC 4122 UUID (services aiming for reproducible UUIDs may also use
+ // Version 5, see RFC 4122 for more recommendations).
+ ServiceInstanceIDKey = attribute.Key("service.instance.id")
+
+ // ServiceVersionKey is the attribute Key conforming to the
+ // "service.version" semantic conventions. It represents the version string
+ // of the service API or implementation.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2.0.0'
+ ServiceVersionKey = attribute.Key("service.version")
+)
+
+// ServiceNamespace returns an attribute KeyValue conforming to the
+// "service.namespace" semantic conventions. It represents a namespace for
+// `service.name`.
+func ServiceNamespace(val string) attribute.KeyValue {
+ return ServiceNamespaceKey.String(val)
+}
+
+// ServiceInstanceID returns an attribute KeyValue conforming to the
+// "service.instance.id" semantic conventions. It represents the string ID of
+// the service instance.
+func ServiceInstanceID(val string) attribute.KeyValue {
+ return ServiceInstanceIDKey.String(val)
+}
+
+// ServiceVersion returns an attribute KeyValue conforming to the
+// "service.version" semantic conventions. It represents the version string of
+// the service API or implementation.
+func ServiceVersion(val string) attribute.KeyValue {
+ return ServiceVersionKey.String(val)
+}
+
+// The telemetry SDK used to capture data recorded by the instrumentation
+// libraries.
+const (
+ // TelemetrySDKNameKey is the attribute Key conforming to the
+ // "telemetry.sdk.name" semantic conventions. It represents the name of the
+ // telemetry SDK as defined above.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name")
+
+ // TelemetrySDKLanguageKey is the attribute Key conforming to the
+ // "telemetry.sdk.language" semantic conventions. It represents the
+ // language of the telemetry SDK.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language")
+
+ // TelemetrySDKVersionKey is the attribute Key conforming to the
+ // "telemetry.sdk.version" semantic conventions. It represents the version
+ // string of the telemetry SDK.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: '1.2.3'
+ TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version")
+)
+
+var (
+ // cpp
+ TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp")
+ // dotnet
+ TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet")
+ // erlang
+ TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang")
+ // go
+ TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go")
+ // java
+ TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java")
+ // nodejs
+ TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs")
+ // php
+ TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php")
+ // python
+ TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python")
+ // ruby
+ TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby")
+ // webjs
+ TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs")
+ // swift
+ TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift")
+)
+
+// TelemetrySDKName returns an attribute KeyValue conforming to the
+// "telemetry.sdk.name" semantic conventions. It represents the name of the
+// telemetry SDK as defined above.
+func TelemetrySDKName(val string) attribute.KeyValue {
+ return TelemetrySDKNameKey.String(val)
+}
+
+// TelemetrySDKVersion returns an attribute KeyValue conforming to the
+// "telemetry.sdk.version" semantic conventions. It represents the version
+// string of the telemetry SDK.
+func TelemetrySDKVersion(val string) attribute.KeyValue {
+ return TelemetrySDKVersionKey.String(val)
+}
+
+// The telemetry SDK used to capture data recorded by the instrumentation
+// libraries.
+const (
+ // TelemetryAutoVersionKey is the attribute Key conforming to the
+ // "telemetry.auto.version" semantic conventions. It represents the version
+ // string of the auto instrumentation agent, if used.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1.2.3'
+ TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version")
+)
+
+// TelemetryAutoVersion returns an attribute KeyValue conforming to the
+// "telemetry.auto.version" semantic conventions. It represents the version
+// string of the auto instrumentation agent, if used.
+func TelemetryAutoVersion(val string) attribute.KeyValue {
+ return TelemetryAutoVersionKey.String(val)
+}
+
+// Resource describing the packaged software running the application code. Web
+// engines are typically executed using process.runtime.
+const (
+ // WebEngineNameKey is the attribute Key conforming to the "webengine.name"
+ // semantic conventions. It represents the name of the web engine.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'WildFly'
+ WebEngineNameKey = attribute.Key("webengine.name")
+
+ // WebEngineVersionKey is the attribute Key conforming to the
+ // "webengine.version" semantic conventions. It represents the version of
+ // the web engine.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '21.0.0'
+ WebEngineVersionKey = attribute.Key("webengine.version")
+
+ // WebEngineDescriptionKey is the attribute Key conforming to the
+ // "webengine.description" semantic conventions. It represents the
+ // additional description of the web engine (e.g. detailed version and
+ // edition information).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) -
+ // 2.2.2.Final'
+ WebEngineDescriptionKey = attribute.Key("webengine.description")
+)
+
+// WebEngineName returns an attribute KeyValue conforming to the
+// "webengine.name" semantic conventions. It represents the name of the web
+// engine.
+func WebEngineName(val string) attribute.KeyValue {
+ return WebEngineNameKey.String(val)
+}
+
+// WebEngineVersion returns an attribute KeyValue conforming to the
+// "webengine.version" semantic conventions. It represents the version of the
+// web engine.
+func WebEngineVersion(val string) attribute.KeyValue {
+ return WebEngineVersionKey.String(val)
+}
+
+// WebEngineDescription returns an attribute KeyValue conforming to the
+// "webengine.description" semantic conventions. It represents the additional
+// description of the web engine (e.g. detailed version and edition
+// information).
+func WebEngineDescription(val string) attribute.KeyValue {
+ return WebEngineDescriptionKey.String(val)
+}
+
+// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's
+// concepts.
+const (
+ // OTelScopeNameKey is the attribute Key conforming to the
+ // "otel.scope.name" semantic conventions. It represents the name of the
+ // instrumentation scope - (`InstrumentationScope.Name` in OTLP).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'io.opentelemetry.contrib.mongodb'
+ OTelScopeNameKey = attribute.Key("otel.scope.name")
+
+ // OTelScopeVersionKey is the attribute Key conforming to the
+ // "otel.scope.version" semantic conventions. It represents the version of
+ // the instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1.0.0'
+ OTelScopeVersionKey = attribute.Key("otel.scope.version")
+)
+
+// OTelScopeName returns an attribute KeyValue conforming to the
+// "otel.scope.name" semantic conventions. It represents the name of the
+// instrumentation scope - (`InstrumentationScope.Name` in OTLP).
+func OTelScopeName(val string) attribute.KeyValue {
+ return OTelScopeNameKey.String(val)
+}
+
+// OTelScopeVersion returns an attribute KeyValue conforming to the
+// "otel.scope.version" semantic conventions. It represents the version of the
+// instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+func OTelScopeVersion(val string) attribute.KeyValue {
+ return OTelScopeVersionKey.String(val)
+}
+
+// Span attributes used by non-OTLP exporters to represent OpenTelemetry
+// Scope's concepts.
+const (
+ // OTelLibraryNameKey is the attribute Key conforming to the
+ // "otel.library.name" semantic conventions. It represents the deprecated,
+ // use the `otel.scope.name` attribute.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 'io.opentelemetry.contrib.mongodb'
+ OTelLibraryNameKey = attribute.Key("otel.library.name")
+
+ // OTelLibraryVersionKey is the attribute Key conforming to the
+ // "otel.library.version" semantic conventions. It represents the
+ // deprecated, use the `otel.scope.version` attribute.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: '1.0.0'
+ OTelLibraryVersionKey = attribute.Key("otel.library.version")
+)
+
+// OTelLibraryName returns an attribute KeyValue conforming to the
+// "otel.library.name" semantic conventions. It represents the deprecated, use
+// the `otel.scope.name` attribute.
+func OTelLibraryName(val string) attribute.KeyValue {
+ return OTelLibraryNameKey.String(val)
+}
+
+// OTelLibraryVersion returns an attribute KeyValue conforming to the
+// "otel.library.version" semantic conventions. It represents the deprecated,
+// use the `otel.scope.version` attribute.
+func OTelLibraryVersion(val string) attribute.KeyValue {
+ return OTelLibraryVersionKey.String(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go
new file mode 100644
index 000000000..95d0210e3
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
+
+// SchemaURL is the schema URL that matches the version of the semantic conventions
+// that this package defines. Semconv packages starting from v1.4.0 must declare
+// non-empty schema URL in the form https://opentelemetry.io/schemas/
+const SchemaURL = "https://opentelemetry.io/schemas/1.20.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go
new file mode 100644
index 000000000..90b1b0452
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go
@@ -0,0 +1,2599 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// The shared attributes used to report a single exception associated with a
+// span or log.
+const (
+ // ExceptionTypeKey is the attribute Key conforming to the "exception.type"
+ // semantic conventions. It represents the type of the exception (its
+ // fully-qualified class name, if applicable). The dynamic type of the
+ // exception should be preferred over the static type in languages that
+ // support it.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'java.net.ConnectException', 'OSError'
+ ExceptionTypeKey = attribute.Key("exception.type")
+
+ // ExceptionMessageKey is the attribute Key conforming to the
+ // "exception.message" semantic conventions. It represents the exception
+ // message.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Division by zero', "Can't convert 'int' object to str
+ // implicitly"
+ ExceptionMessageKey = attribute.Key("exception.message")
+
+ // ExceptionStacktraceKey is the attribute Key conforming to the
+ // "exception.stacktrace" semantic conventions. It represents a stacktrace
+ // as a string in the natural representation for the language runtime. The
+ // representation is to be determined and documented by each language SIG.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test
+ // exception\\n at '
+ // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
+ // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
+ // 'com.example.GenerateTrace.main(GenerateTrace.java:5)'
+ ExceptionStacktraceKey = attribute.Key("exception.stacktrace")
+)
+
+// ExceptionType returns an attribute KeyValue conforming to the
+// "exception.type" semantic conventions. It represents the type of the
+// exception (its fully-qualified class name, if applicable). The dynamic type
+// of the exception should be preferred over the static type in languages that
+// support it.
+func ExceptionType(val string) attribute.KeyValue {
+ return ExceptionTypeKey.String(val)
+}
+
+// ExceptionMessage returns an attribute KeyValue conforming to the
+// "exception.message" semantic conventions. It represents the exception
+// message.
+func ExceptionMessage(val string) attribute.KeyValue {
+ return ExceptionMessageKey.String(val)
+}
+
+// ExceptionStacktrace returns an attribute KeyValue conforming to the
+// "exception.stacktrace" semantic conventions. It represents a stacktrace as a
+// string in the natural representation for the language runtime. The
+// representation is to be determined and documented by each language SIG.
+func ExceptionStacktrace(val string) attribute.KeyValue {
+ return ExceptionStacktraceKey.String(val)
+}
+
+// The attributes described in this section are rather generic. They may be
+// used in any Log Record they apply to.
+const (
+ // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid"
+ // semantic conventions. It represents a unique identifier for the Log
+ // Record.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV'
+ // Note: If an id is provided, other log records with the same id will be
+ // considered duplicates and can be removed safely. This means, that two
+ // distinguishable log records MUST have different values.
+ // The id MAY be an [Universally Unique Lexicographically Sortable
+ // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers
+ // (e.g. UUID) may be used as needed.
+ LogRecordUIDKey = attribute.Key("log.record.uid")
+)
+
+// LogRecordUID returns an attribute KeyValue conforming to the
+// "log.record.uid" semantic conventions. It represents a unique identifier for
+// the Log Record.
+func LogRecordUID(val string) attribute.KeyValue {
+ return LogRecordUIDKey.String(val)
+}
+
+// Span attributes used by AWS Lambda (in addition to general `faas`
+// attributes).
+const (
+ // AWSLambdaInvokedARNKey is the attribute Key conforming to the
+ // "aws.lambda.invoked_arn" semantic conventions. It represents the full
+ // invoked ARN as provided on the `Context` passed to the function
+ // (`Lambda-Runtime-Invoked-Function-ARN` header on the
+ // `/runtime/invocation/next` applicable).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias'
+ // Note: This may be different from `cloud.resource_id` if an alias is
+ // involved.
+ AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn")
+)
+
+// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the
+// "aws.lambda.invoked_arn" semantic conventions. It represents the full
+// invoked ARN as provided on the `Context` passed to the function
+// (`Lambda-Runtime-Invoked-Function-ARN` header on the
+// `/runtime/invocation/next` applicable).
+func AWSLambdaInvokedARN(val string) attribute.KeyValue {
+ return AWSLambdaInvokedARNKey.String(val)
+}
+
+// Attributes for CloudEvents. CloudEvents is a specification on how to define
+// event data in a standard way. These attributes can be attached to spans when
+// performing operations with CloudEvents, regardless of the protocol being
+// used.
+const (
+ // CloudeventsEventIDKey is the attribute Key conforming to the
+ // "cloudevents.event_id" semantic conventions. It represents the
+ // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
+ // uniquely identifies the event.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001'
+ CloudeventsEventIDKey = attribute.Key("cloudevents.event_id")
+
+ // CloudeventsEventSourceKey is the attribute Key conforming to the
+ // "cloudevents.event_source" semantic conventions. It represents the
+ // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
+ // identifies the context in which an event happened.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'https://github.com/cloudevents',
+ // '/cloudevents/spec/pull/123', 'my-service'
+ CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source")
+
+ // CloudeventsEventSpecVersionKey is the attribute Key conforming to the
+ // "cloudevents.event_spec_version" semantic conventions. It represents the
+ // [version of the CloudEvents
+ // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
+ // which the event uses.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1.0'
+ CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version")
+
+ // CloudeventsEventTypeKey is the attribute Key conforming to the
+ // "cloudevents.event_type" semantic conventions. It represents the
+ // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
+ // contains a value describing the type of event related to the originating
+ // occurrence.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'com.github.pull_request.opened',
+ // 'com.example.object.deleted.v2'
+ CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type")
+
+ // CloudeventsEventSubjectKey is the attribute Key conforming to the
+ // "cloudevents.event_subject" semantic conventions. It represents the
+ // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
+ // of the event in the context of the event producer (identified by
+ // source).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'mynewfile.jpg'
+ CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject")
+)
+
+// CloudeventsEventID returns an attribute KeyValue conforming to the
+// "cloudevents.event_id" semantic conventions. It represents the
+// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
+// uniquely identifies the event.
+func CloudeventsEventID(val string) attribute.KeyValue {
+ return CloudeventsEventIDKey.String(val)
+}
+
+// CloudeventsEventSource returns an attribute KeyValue conforming to the
+// "cloudevents.event_source" semantic conventions. It represents the
+// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
+// identifies the context in which an event happened.
+func CloudeventsEventSource(val string) attribute.KeyValue {
+ return CloudeventsEventSourceKey.String(val)
+}
+
+// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to
+// the "cloudevents.event_spec_version" semantic conventions. It represents the
+// [version of the CloudEvents
+// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
+// which the event uses.
+func CloudeventsEventSpecVersion(val string) attribute.KeyValue {
+ return CloudeventsEventSpecVersionKey.String(val)
+}
+
+// CloudeventsEventType returns an attribute KeyValue conforming to the
+// "cloudevents.event_type" semantic conventions. It represents the
+// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
+// contains a value describing the type of event related to the originating
+// occurrence.
+func CloudeventsEventType(val string) attribute.KeyValue {
+ return CloudeventsEventTypeKey.String(val)
+}
+
+// CloudeventsEventSubject returns an attribute KeyValue conforming to the
+// "cloudevents.event_subject" semantic conventions. It represents the
+// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
+// of the event in the context of the event producer (identified by source).
+func CloudeventsEventSubject(val string) attribute.KeyValue {
+ return CloudeventsEventSubjectKey.String(val)
+}
+
+// Semantic conventions for the OpenTracing Shim
+const (
+ // OpentracingRefTypeKey is the attribute Key conforming to the
+ // "opentracing.ref_type" semantic conventions. It represents the
+ // parent-child Reference type
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: The causal relationship between a child Span and a parent Span.
+ OpentracingRefTypeKey = attribute.Key("opentracing.ref_type")
+)
+
+var (
+ // The parent Span depends on the child Span in some capacity
+ OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of")
+ // The parent Span does not depend in any way on the result of the child Span
+ OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from")
+)
+
+// The attributes used to perform database client calls.
+const (
+ // DBSystemKey is the attribute Key conforming to the "db.system" semantic
+ // conventions. It represents an identifier for the database management
+ // system (DBMS) product being used. See below for a list of well-known
+ // identifiers.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ DBSystemKey = attribute.Key("db.system")
+
+ // DBConnectionStringKey is the attribute Key conforming to the
+ // "db.connection_string" semantic conventions. It represents the
+ // connection string used to connect to the database. It is recommended to
+ // remove embedded credentials.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;'
+ DBConnectionStringKey = attribute.Key("db.connection_string")
+
+ // DBUserKey is the attribute Key conforming to the "db.user" semantic
+ // conventions. It represents the username for accessing the database.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'readonly_user', 'reporting_user'
+ DBUserKey = attribute.Key("db.user")
+
+ // DBJDBCDriverClassnameKey is the attribute Key conforming to the
+ // "db.jdbc.driver_classname" semantic conventions. It represents the
+ // fully-qualified class name of the [Java Database Connectivity
+ // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/)
+ // driver used to connect.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'org.postgresql.Driver',
+ // 'com.microsoft.sqlserver.jdbc.SQLServerDriver'
+ DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname")
+
+ // DBNameKey is the attribute Key conforming to the "db.name" semantic
+ // conventions. It represents the this attribute is used to report the name
+ // of the database being accessed. For commands that switch the database,
+ // this should be set to the target database (even if the command fails).
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If applicable.)
+ // Stability: stable
+ // Examples: 'customers', 'main'
+ // Note: In some SQL databases, the database name to be used is called
+ // "schema name". In case there are multiple layers that could be
+ // considered for database name (e.g. Oracle instance name and schema
+ // name), the database name to be used is the more specific layer (e.g.
+ // Oracle schema name).
+ DBNameKey = attribute.Key("db.name")
+
+ // DBStatementKey is the attribute Key conforming to the "db.statement"
+ // semantic conventions. It represents the database statement being
+ // executed.
+ //
+ // Type: string
+ // RequirementLevel: Recommended (Should be collected by default only if
+ // there is sanitization that excludes sensitive information.)
+ // Stability: stable
+ // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"'
+ DBStatementKey = attribute.Key("db.statement")
+
+ // DBOperationKey is the attribute Key conforming to the "db.operation"
+ // semantic conventions. It represents the name of the operation being
+ // executed, e.g. the [MongoDB command
+ // name](https://docs.mongodb.com/manual/reference/command/#database-operations)
+ // such as `findAndModify`, or the SQL keyword.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If `db.statement` is not
+ // applicable.)
+ // Stability: stable
+ // Examples: 'findAndModify', 'HMSET', 'SELECT'
+ // Note: When setting this to an SQL keyword, it is not recommended to
+ // attempt any client-side parsing of `db.statement` just to get this
+ // property, but it should be set if the operation name is provided by the
+ // library being instrumented. If the SQL statement has an ambiguous
+ // operation, or performs more than one operation, this value may be
+ // omitted.
+ DBOperationKey = attribute.Key("db.operation")
+)
+
+var (
+ // Some other SQL database. Fallback only. See notes
+ DBSystemOtherSQL = DBSystemKey.String("other_sql")
+ // Microsoft SQL Server
+ DBSystemMSSQL = DBSystemKey.String("mssql")
+ // Microsoft SQL Server Compact
+ DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact")
+ // MySQL
+ DBSystemMySQL = DBSystemKey.String("mysql")
+ // Oracle Database
+ DBSystemOracle = DBSystemKey.String("oracle")
+ // IBM DB2
+ DBSystemDB2 = DBSystemKey.String("db2")
+ // PostgreSQL
+ DBSystemPostgreSQL = DBSystemKey.String("postgresql")
+ // Amazon Redshift
+ DBSystemRedshift = DBSystemKey.String("redshift")
+ // Apache Hive
+ DBSystemHive = DBSystemKey.String("hive")
+ // Cloudscape
+ DBSystemCloudscape = DBSystemKey.String("cloudscape")
+ // HyperSQL DataBase
+ DBSystemHSQLDB = DBSystemKey.String("hsqldb")
+ // Progress Database
+ DBSystemProgress = DBSystemKey.String("progress")
+ // SAP MaxDB
+ DBSystemMaxDB = DBSystemKey.String("maxdb")
+ // SAP HANA
+ DBSystemHanaDB = DBSystemKey.String("hanadb")
+ // Ingres
+ DBSystemIngres = DBSystemKey.String("ingres")
+ // FirstSQL
+ DBSystemFirstSQL = DBSystemKey.String("firstsql")
+ // EnterpriseDB
+ DBSystemEDB = DBSystemKey.String("edb")
+ // InterSystems Caché
+ DBSystemCache = DBSystemKey.String("cache")
+ // Adabas (Adaptable Database System)
+ DBSystemAdabas = DBSystemKey.String("adabas")
+ // Firebird
+ DBSystemFirebird = DBSystemKey.String("firebird")
+ // Apache Derby
+ DBSystemDerby = DBSystemKey.String("derby")
+ // FileMaker
+ DBSystemFilemaker = DBSystemKey.String("filemaker")
+ // Informix
+ DBSystemInformix = DBSystemKey.String("informix")
+ // InstantDB
+ DBSystemInstantDB = DBSystemKey.String("instantdb")
+ // InterBase
+ DBSystemInterbase = DBSystemKey.String("interbase")
+ // MariaDB
+ DBSystemMariaDB = DBSystemKey.String("mariadb")
+ // Netezza
+ DBSystemNetezza = DBSystemKey.String("netezza")
+ // Pervasive PSQL
+ DBSystemPervasive = DBSystemKey.String("pervasive")
+ // PointBase
+ DBSystemPointbase = DBSystemKey.String("pointbase")
+ // SQLite
+ DBSystemSqlite = DBSystemKey.String("sqlite")
+ // Sybase
+ DBSystemSybase = DBSystemKey.String("sybase")
+ // Teradata
+ DBSystemTeradata = DBSystemKey.String("teradata")
+ // Vertica
+ DBSystemVertica = DBSystemKey.String("vertica")
+ // H2
+ DBSystemH2 = DBSystemKey.String("h2")
+ // ColdFusion IMQ
+ DBSystemColdfusion = DBSystemKey.String("coldfusion")
+ // Apache Cassandra
+ DBSystemCassandra = DBSystemKey.String("cassandra")
+ // Apache HBase
+ DBSystemHBase = DBSystemKey.String("hbase")
+ // MongoDB
+ DBSystemMongoDB = DBSystemKey.String("mongodb")
+ // Redis
+ DBSystemRedis = DBSystemKey.String("redis")
+ // Couchbase
+ DBSystemCouchbase = DBSystemKey.String("couchbase")
+ // CouchDB
+ DBSystemCouchDB = DBSystemKey.String("couchdb")
+ // Microsoft Azure Cosmos DB
+ DBSystemCosmosDB = DBSystemKey.String("cosmosdb")
+ // Amazon DynamoDB
+ DBSystemDynamoDB = DBSystemKey.String("dynamodb")
+ // Neo4j
+ DBSystemNeo4j = DBSystemKey.String("neo4j")
+ // Apache Geode
+ DBSystemGeode = DBSystemKey.String("geode")
+ // Elasticsearch
+ DBSystemElasticsearch = DBSystemKey.String("elasticsearch")
+ // Memcached
+ DBSystemMemcached = DBSystemKey.String("memcached")
+ // CockroachDB
+ DBSystemCockroachdb = DBSystemKey.String("cockroachdb")
+ // OpenSearch
+ DBSystemOpensearch = DBSystemKey.String("opensearch")
+ // ClickHouse
+ DBSystemClickhouse = DBSystemKey.String("clickhouse")
+ // Cloud Spanner
+ DBSystemSpanner = DBSystemKey.String("spanner")
+ // Trino
+ DBSystemTrino = DBSystemKey.String("trino")
+)
+
+// DBConnectionString returns an attribute KeyValue conforming to the
+// "db.connection_string" semantic conventions. It represents the connection
+// string used to connect to the database. It is recommended to remove embedded
+// credentials.
+func DBConnectionString(val string) attribute.KeyValue {
+ return DBConnectionStringKey.String(val)
+}
+
+// DBUser returns an attribute KeyValue conforming to the "db.user" semantic
+// conventions. It represents the username for accessing the database.
+func DBUser(val string) attribute.KeyValue {
+ return DBUserKey.String(val)
+}
+
+// DBJDBCDriverClassname returns an attribute KeyValue conforming to the
+// "db.jdbc.driver_classname" semantic conventions. It represents the
+// fully-qualified class name of the [Java Database Connectivity
+// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver
+// used to connect.
+func DBJDBCDriverClassname(val string) attribute.KeyValue {
+ return DBJDBCDriverClassnameKey.String(val)
+}
+
+// DBName returns an attribute KeyValue conforming to the "db.name" semantic
+// conventions. It represents the this attribute is used to report the name of
+// the database being accessed. For commands that switch the database, this
+// should be set to the target database (even if the command fails).
+func DBName(val string) attribute.KeyValue {
+ return DBNameKey.String(val)
+}
+
+// DBStatement returns an attribute KeyValue conforming to the
+// "db.statement" semantic conventions. It represents the database statement
+// being executed.
+func DBStatement(val string) attribute.KeyValue {
+ return DBStatementKey.String(val)
+}
+
+// DBOperation returns an attribute KeyValue conforming to the
+// "db.operation" semantic conventions. It represents the name of the operation
+// being executed, e.g. the [MongoDB command
+// name](https://docs.mongodb.com/manual/reference/command/#database-operations)
+// such as `findAndModify`, or the SQL keyword.
+func DBOperation(val string) attribute.KeyValue {
+ return DBOperationKey.String(val)
+}
+
+// Connection-level attributes for Microsoft SQL Server
+const (
+ // DBMSSQLInstanceNameKey is the attribute Key conforming to the
+ // "db.mssql.instance_name" semantic conventions. It represents the
+ // Microsoft SQL Server [instance
+ // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
+ // connecting to. This name is used to determine the port of a named
+ // instance.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'MSSQLSERVER'
+ // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no
+ // longer required (but still recommended if non-standard).
+ DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name")
+)
+
+// DBMSSQLInstanceName returns an attribute KeyValue conforming to the
+// "db.mssql.instance_name" semantic conventions. It represents the Microsoft
+// SQL Server [instance
+// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
+// connecting to. This name is used to determine the port of a named instance.
+func DBMSSQLInstanceName(val string) attribute.KeyValue {
+ return DBMSSQLInstanceNameKey.String(val)
+}
+
+// Call-level attributes for Cassandra
+const (
+ // DBCassandraPageSizeKey is the attribute Key conforming to the
+ // "db.cassandra.page_size" semantic conventions. It represents the fetch
+ // size used for paging, i.e. how many rows will be returned at once.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 5000
+ DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size")
+
+ // DBCassandraConsistencyLevelKey is the attribute Key conforming to the
+ // "db.cassandra.consistency_level" semantic conventions. It represents the
+ // consistency level of the query. Based on consistency values from
+ // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html).
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level")
+
+ // DBCassandraTableKey is the attribute Key conforming to the
+ // "db.cassandra.table" semantic conventions. It represents the name of the
+ // primary table that the operation is acting upon, including the keyspace
+ // name (if applicable).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'mytable'
+ // Note: This mirrors the db.sql.table attribute but references cassandra
+ // rather than sql. It is not recommended to attempt any client-side
+ // parsing of `db.statement` just to get this property, but it should be
+ // set if it is provided by the library being instrumented. If the
+ // operation is acting upon an anonymous table, or more than one table,
+ // this value MUST NOT be set.
+ DBCassandraTableKey = attribute.Key("db.cassandra.table")
+
+ // DBCassandraIdempotenceKey is the attribute Key conforming to the
+ // "db.cassandra.idempotence" semantic conventions. It represents the
+ // whether or not the query is idempotent.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence")
+
+ // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming
+ // to the "db.cassandra.speculative_execution_count" semantic conventions.
+ // It represents the number of times a query was speculatively executed.
+ // Not set or `0` if the query was not executed speculatively.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 0, 2
+ DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count")
+
+ // DBCassandraCoordinatorIDKey is the attribute Key conforming to the
+ // "db.cassandra.coordinator.id" semantic conventions. It represents the ID
+ // of the coordinating node for a query.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af'
+ DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id")
+
+ // DBCassandraCoordinatorDCKey is the attribute Key conforming to the
+ // "db.cassandra.coordinator.dc" semantic conventions. It represents the
+ // data center of the coordinating node for a query.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'us-west-2'
+ DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc")
+)
+
+var (
+ // all
+ DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all")
+ // each_quorum
+ DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum")
+ // quorum
+ DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum")
+ // local_quorum
+ DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum")
+ // one
+ DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one")
+ // two
+ DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two")
+ // three
+ DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three")
+ // local_one
+ DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one")
+ // any
+ DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any")
+ // serial
+ DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial")
+ // local_serial
+ DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial")
+)
+
+// DBCassandraPageSize returns an attribute KeyValue conforming to the
+// "db.cassandra.page_size" semantic conventions. It represents the fetch size
+// used for paging, i.e. how many rows will be returned at once.
+func DBCassandraPageSize(val int) attribute.KeyValue {
+ return DBCassandraPageSizeKey.Int(val)
+}
+
+// DBCassandraTable returns an attribute KeyValue conforming to the
+// "db.cassandra.table" semantic conventions. It represents the name of the
+// primary table that the operation is acting upon, including the keyspace name
+// (if applicable).
+func DBCassandraTable(val string) attribute.KeyValue {
+ return DBCassandraTableKey.String(val)
+}
+
+// DBCassandraIdempotence returns an attribute KeyValue conforming to the
+// "db.cassandra.idempotence" semantic conventions. It represents the whether
+// or not the query is idempotent.
+func DBCassandraIdempotence(val bool) attribute.KeyValue {
+ return DBCassandraIdempotenceKey.Bool(val)
+}
+
+// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue
+// conforming to the "db.cassandra.speculative_execution_count" semantic
+// conventions. It represents the number of times a query was speculatively
+// executed. Not set or `0` if the query was not executed speculatively.
+func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue {
+ return DBCassandraSpeculativeExecutionCountKey.Int(val)
+}
+
+// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the
+// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of
+// the coordinating node for a query.
+func DBCassandraCoordinatorID(val string) attribute.KeyValue {
+ return DBCassandraCoordinatorIDKey.String(val)
+}
+
+// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the
+// "db.cassandra.coordinator.dc" semantic conventions. It represents the data
+// center of the coordinating node for a query.
+func DBCassandraCoordinatorDC(val string) attribute.KeyValue {
+ return DBCassandraCoordinatorDCKey.String(val)
+}
+
+// Call-level attributes for Redis
+const (
+ // DBRedisDBIndexKey is the attribute Key conforming to the
+ // "db.redis.database_index" semantic conventions. It represents the index
+ // of the database being accessed as used in the [`SELECT`
+ // command](https://redis.io/commands/select), provided as an integer. To
+ // be used instead of the generic `db.name` attribute.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If other than the default
+ // database (`0`).)
+ // Stability: stable
+ // Examples: 0, 1, 15
+ DBRedisDBIndexKey = attribute.Key("db.redis.database_index")
+)
+
+// DBRedisDBIndex returns an attribute KeyValue conforming to the
+// "db.redis.database_index" semantic conventions. It represents the index of
+// the database being accessed as used in the [`SELECT`
+// command](https://redis.io/commands/select), provided as an integer. To be
+// used instead of the generic `db.name` attribute.
+func DBRedisDBIndex(val int) attribute.KeyValue {
+ return DBRedisDBIndexKey.Int(val)
+}
+
+// Call-level attributes for MongoDB
+const (
+ // DBMongoDBCollectionKey is the attribute Key conforming to the
+ // "db.mongodb.collection" semantic conventions. It represents the
+ // collection being accessed within the database stated in `db.name`.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'customers', 'products'
+ DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection")
+)
+
+// DBMongoDBCollection returns an attribute KeyValue conforming to the
+// "db.mongodb.collection" semantic conventions. It represents the collection
+// being accessed within the database stated in `db.name`.
+func DBMongoDBCollection(val string) attribute.KeyValue {
+ return DBMongoDBCollectionKey.String(val)
+}
+
+// Call-level attributes for SQL databases
+const (
+ // DBSQLTableKey is the attribute Key conforming to the "db.sql.table"
+ // semantic conventions. It represents the name of the primary table that
+ // the operation is acting upon, including the database name (if
+ // applicable).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'public.users', 'customers'
+ // Note: It is not recommended to attempt any client-side parsing of
+ // `db.statement` just to get this property, but it should be set if it is
+ // provided by the library being instrumented. If the operation is acting
+ // upon an anonymous table, or more than one table, this value MUST NOT be
+ // set.
+ DBSQLTableKey = attribute.Key("db.sql.table")
+)
+
+// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table"
+// semantic conventions. It represents the name of the primary table that the
+// operation is acting upon, including the database name (if applicable).
+func DBSQLTable(val string) attribute.KeyValue {
+ return DBSQLTableKey.String(val)
+}
+
+// Call-level attributes for Cosmos DB.
+const (
+ // DBCosmosDBClientIDKey is the attribute Key conforming to the
+ // "db.cosmosdb.client_id" semantic conventions. It represents the unique
+ // Cosmos client instance id.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d'
+ DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id")
+
+ // DBCosmosDBOperationTypeKey is the attribute Key conforming to the
+ // "db.cosmosdb.operation_type" semantic conventions. It represents the
+ // cosmosDB Operation Type.
+ //
+ // Type: Enum
+ // RequirementLevel: ConditionallyRequired (when performing one of the
+ // operations in this list)
+ // Stability: stable
+ DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type")
+
+ // DBCosmosDBConnectionModeKey is the attribute Key conforming to the
+ // "db.cosmosdb.connection_mode" semantic conventions. It represents the
+ // cosmos client connection mode.
+ //
+ // Type: Enum
+ // RequirementLevel: ConditionallyRequired (if not `direct` (or pick gw as
+ // default))
+ // Stability: stable
+ DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode")
+
+ // DBCosmosDBContainerKey is the attribute Key conforming to the
+ // "db.cosmosdb.container" semantic conventions. It represents the cosmos
+ // DB container name.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (if available)
+ // Stability: stable
+ // Examples: 'anystring'
+ DBCosmosDBContainerKey = attribute.Key("db.cosmosdb.container")
+
+ // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the
+ // "db.cosmosdb.request_content_length" semantic conventions. It represents
+ // the request payload size in bytes
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length")
+
+ // DBCosmosDBStatusCodeKey is the attribute Key conforming to the
+ // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos
+ // DB status code.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (if response was received)
+ // Stability: stable
+ // Examples: 200, 201
+ DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code")
+
+ // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the
+ // "db.cosmosdb.sub_status_code" semantic conventions. It represents the
+ // cosmos DB sub status code.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (when response was received and
+ // contained sub-code.)
+ // Stability: stable
+ // Examples: 1000, 1002
+ DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code")
+
+ // DBCosmosDBRequestChargeKey is the attribute Key conforming to the
+ // "db.cosmosdb.request_charge" semantic conventions. It represents the rU
+ // consumed for that operation
+ //
+ // Type: double
+ // RequirementLevel: ConditionallyRequired (when available)
+ // Stability: stable
+ // Examples: 46.18, 1.0
+ DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge")
+)
+
+var (
+ // invalid
+ DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid")
+ // create
+ DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create")
+ // patch
+ DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch")
+ // read
+ DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read")
+ // read_feed
+ DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed")
+ // delete
+ DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete")
+ // replace
+ DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace")
+ // execute
+ DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute")
+ // query
+ DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query")
+ // head
+ DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head")
+ // head_feed
+ DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed")
+ // upsert
+ DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert")
+ // batch
+ DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch")
+ // query_plan
+ DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan")
+ // execute_javascript
+ DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript")
+)
+
+var (
+ // Gateway (HTTP) connections mode
+ DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway")
+ // Direct connection
+ DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct")
+)
+
+// DBCosmosDBClientID returns an attribute KeyValue conforming to the
+// "db.cosmosdb.client_id" semantic conventions. It represents the unique
+// Cosmos client instance id.
+func DBCosmosDBClientID(val string) attribute.KeyValue {
+ return DBCosmosDBClientIDKey.String(val)
+}
+
+// DBCosmosDBContainer returns an attribute KeyValue conforming to the
+// "db.cosmosdb.container" semantic conventions. It represents the cosmos DB
+// container name.
+func DBCosmosDBContainer(val string) attribute.KeyValue {
+ return DBCosmosDBContainerKey.String(val)
+}
+
+// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming
+// to the "db.cosmosdb.request_content_length" semantic conventions. It
+// represents the request payload size in bytes
+func DBCosmosDBRequestContentLength(val int) attribute.KeyValue {
+ return DBCosmosDBRequestContentLengthKey.Int(val)
+}
+
+// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the
+// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB
+// status code.
+func DBCosmosDBStatusCode(val int) attribute.KeyValue {
+ return DBCosmosDBStatusCodeKey.Int(val)
+}
+
+// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the
+// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos
+// DB sub status code.
+func DBCosmosDBSubStatusCode(val int) attribute.KeyValue {
+ return DBCosmosDBSubStatusCodeKey.Int(val)
+}
+
+// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the
+// "db.cosmosdb.request_charge" semantic conventions. It represents the rU
+// consumed for that operation
+func DBCosmosDBRequestCharge(val float64) attribute.KeyValue {
+ return DBCosmosDBRequestChargeKey.Float64(val)
+}
+
+// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's
+// concepts.
+const (
+ // OTelStatusCodeKey is the attribute Key conforming to the
+ // "otel.status_code" semantic conventions. It represents the name of the
+ // code, either "OK" or "ERROR". MUST NOT be set if the status code is
+ // UNSET.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ OTelStatusCodeKey = attribute.Key("otel.status_code")
+
+ // OTelStatusDescriptionKey is the attribute Key conforming to the
+ // "otel.status_description" semantic conventions. It represents the
+ // description of the Status if it has a value, otherwise not set.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'resource not found'
+ OTelStatusDescriptionKey = attribute.Key("otel.status_description")
+)
+
+var (
+ // The operation has been validated by an Application developer or Operator to have completed successfully
+ OTelStatusCodeOk = OTelStatusCodeKey.String("OK")
+ // The operation contains an error
+ OTelStatusCodeError = OTelStatusCodeKey.String("ERROR")
+)
+
+// OTelStatusDescription returns an attribute KeyValue conforming to the
+// "otel.status_description" semantic conventions. It represents the
+// description of the Status if it has a value, otherwise not set.
+func OTelStatusDescription(val string) attribute.KeyValue {
+ return OTelStatusDescriptionKey.String(val)
+}
+
+// This semantic convention describes an instance of a function that runs
+// without provisioning or managing of servers (also known as serverless
+// functions or Function as a Service (FaaS)) with spans.
+const (
+ // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger"
+ // semantic conventions. It represents the type of the trigger which caused
+ // this function invocation.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: For the server/consumer span on the incoming side,
+ // `faas.trigger` MUST be set.
+ //
+ // Clients invoking FaaS instances usually cannot set `faas.trigger`,
+ // since they would typically need to look in the payload to determine
+ // the event type. If clients set it, it should be the same as the
+ // trigger that corresponding incoming would have (i.e., this has
+ // nothing to do with the underlying transport used to make the API
+ // call to invoke the lambda, which is often HTTP).
+ FaaSTriggerKey = attribute.Key("faas.trigger")
+
+ // FaaSInvocationIDKey is the attribute Key conforming to the
+ // "faas.invocation_id" semantic conventions. It represents the invocation
+ // ID of the current function invocation.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28'
+ FaaSInvocationIDKey = attribute.Key("faas.invocation_id")
+)
+
+var (
+ // A response to some data source operation such as a database or filesystem read/write
+ FaaSTriggerDatasource = FaaSTriggerKey.String("datasource")
+ // To provide an answer to an inbound HTTP request
+ FaaSTriggerHTTP = FaaSTriggerKey.String("http")
+ // A function is set to be executed when messages are sent to a messaging system
+ FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub")
+ // A function is scheduled to be executed regularly
+ FaaSTriggerTimer = FaaSTriggerKey.String("timer")
+ // If none of the others apply
+ FaaSTriggerOther = FaaSTriggerKey.String("other")
+)
+
+// FaaSInvocationID returns an attribute KeyValue conforming to the
+// "faas.invocation_id" semantic conventions. It represents the invocation ID
+// of the current function invocation.
+func FaaSInvocationID(val string) attribute.KeyValue {
+ return FaaSInvocationIDKey.String(val)
+}
+
+// Semantic Convention for FaaS triggered as a response to some data source
+// operation such as a database or filesystem read/write.
+const (
+ // FaaSDocumentCollectionKey is the attribute Key conforming to the
+ // "faas.document.collection" semantic conventions. It represents the name
+ // of the source on which the triggering operation was performed. For
+ // example, in Cloud Storage or S3 corresponds to the bucket name, and in
+ // Cosmos DB to the database name.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'myBucketName', 'myDBName'
+ FaaSDocumentCollectionKey = attribute.Key("faas.document.collection")
+
+ // FaaSDocumentOperationKey is the attribute Key conforming to the
+ // "faas.document.operation" semantic conventions. It represents the
+ // describes the type of the operation that was performed on the data.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ FaaSDocumentOperationKey = attribute.Key("faas.document.operation")
+
+ // FaaSDocumentTimeKey is the attribute Key conforming to the
+ // "faas.document.time" semantic conventions. It represents a string
+ // containing the time when the data was accessed in the [ISO
+ // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+ // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2020-01-23T13:47:06Z'
+ FaaSDocumentTimeKey = attribute.Key("faas.document.time")
+
+ // FaaSDocumentNameKey is the attribute Key conforming to the
+ // "faas.document.name" semantic conventions. It represents the document
+ // name/table subjected to the operation. For example, in Cloud Storage or
+ // S3 is the name of the file, and in Cosmos DB the table name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'myFile.txt', 'myTableName'
+ FaaSDocumentNameKey = attribute.Key("faas.document.name")
+)
+
+var (
+ // When a new object is created
+ FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert")
+ // When an object is modified
+ FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit")
+ // When an object is deleted
+ FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete")
+)
+
+// FaaSDocumentCollection returns an attribute KeyValue conforming to the
+// "faas.document.collection" semantic conventions. It represents the name of
+// the source on which the triggering operation was performed. For example, in
+// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the
+// database name.
+func FaaSDocumentCollection(val string) attribute.KeyValue {
+ return FaaSDocumentCollectionKey.String(val)
+}
+
+// FaaSDocumentTime returns an attribute KeyValue conforming to the
+// "faas.document.time" semantic conventions. It represents a string containing
+// the time when the data was accessed in the [ISO
+// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+func FaaSDocumentTime(val string) attribute.KeyValue {
+ return FaaSDocumentTimeKey.String(val)
+}
+
+// FaaSDocumentName returns an attribute KeyValue conforming to the
+// "faas.document.name" semantic conventions. It represents the document
+// name/table subjected to the operation. For example, in Cloud Storage or S3
+// is the name of the file, and in Cosmos DB the table name.
+func FaaSDocumentName(val string) attribute.KeyValue {
+ return FaaSDocumentNameKey.String(val)
+}
+
+// Semantic Convention for FaaS scheduled to be executed regularly.
+const (
+ // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic
+ // conventions. It represents a string containing the function invocation
+ // time in the [ISO
+ // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+ // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2020-01-23T13:47:06Z'
+ FaaSTimeKey = attribute.Key("faas.time")
+
+ // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic
+ // conventions. It represents a string containing the schedule period as
+ // [Cron
+ // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '0/5 * * * ? *'
+ FaaSCronKey = attribute.Key("faas.cron")
+)
+
+// FaaSTime returns an attribute KeyValue conforming to the "faas.time"
+// semantic conventions. It represents a string containing the function
+// invocation time in the [ISO
+// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+func FaaSTime(val string) attribute.KeyValue {
+ return FaaSTimeKey.String(val)
+}
+
+// FaaSCron returns an attribute KeyValue conforming to the "faas.cron"
+// semantic conventions. It represents a string containing the schedule period
+// as [Cron
+// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
+func FaaSCron(val string) attribute.KeyValue {
+ return FaaSCronKey.String(val)
+}
+
+// Contains additional attributes for incoming FaaS spans.
+const (
+ // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart"
+ // semantic conventions. It represents a boolean that is true if the
+ // serverless function is executed for the first time (aka cold-start).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ FaaSColdstartKey = attribute.Key("faas.coldstart")
+)
+
+// FaaSColdstart returns an attribute KeyValue conforming to the
+// "faas.coldstart" semantic conventions. It represents a boolean that is true
+// if the serverless function is executed for the first time (aka cold-start).
+func FaaSColdstart(val bool) attribute.KeyValue {
+ return FaaSColdstartKey.Bool(val)
+}
+
+// Contains additional attributes for outgoing FaaS spans.
+const (
+ // FaaSInvokedNameKey is the attribute Key conforming to the
+ // "faas.invoked_name" semantic conventions. It represents the name of the
+ // invoked function.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'my-function'
+ // Note: SHOULD be equal to the `faas.name` resource attribute of the
+ // invoked function.
+ FaaSInvokedNameKey = attribute.Key("faas.invoked_name")
+
+ // FaaSInvokedProviderKey is the attribute Key conforming to the
+ // "faas.invoked_provider" semantic conventions. It represents the cloud
+ // provider of the invoked function.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ // Note: SHOULD be equal to the `cloud.provider` resource attribute of the
+ // invoked function.
+ FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider")
+
+ // FaaSInvokedRegionKey is the attribute Key conforming to the
+ // "faas.invoked_region" semantic conventions. It represents the cloud
+ // region of the invoked function.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (For some cloud providers, like
+ // AWS or GCP, the region in which a function is hosted is essential to
+ // uniquely identify the function and also part of its endpoint. Since it's
+ // part of the endpoint being called, the region is always known to
+ // clients. In these cases, `faas.invoked_region` MUST be set accordingly.
+ // If the region is unknown to the client or not required for identifying
+ // the invoked function, setting `faas.invoked_region` is optional.)
+ // Stability: stable
+ // Examples: 'eu-central-1'
+ // Note: SHOULD be equal to the `cloud.region` resource attribute of the
+ // invoked function.
+ FaaSInvokedRegionKey = attribute.Key("faas.invoked_region")
+)
+
+var (
+ // Alibaba Cloud
+ FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud")
+ // Amazon Web Services
+ FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws")
+ // Microsoft Azure
+ FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure")
+ // Google Cloud Platform
+ FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp")
+ // Tencent Cloud
+ FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud")
+)
+
+// FaaSInvokedName returns an attribute KeyValue conforming to the
+// "faas.invoked_name" semantic conventions. It represents the name of the
+// invoked function.
+func FaaSInvokedName(val string) attribute.KeyValue {
+ return FaaSInvokedNameKey.String(val)
+}
+
+// FaaSInvokedRegion returns an attribute KeyValue conforming to the
+// "faas.invoked_region" semantic conventions. It represents the cloud region
+// of the invoked function.
+func FaaSInvokedRegion(val string) attribute.KeyValue {
+ return FaaSInvokedRegionKey.String(val)
+}
+
+// Operations that access some remote service.
+const (
+ // PeerServiceKey is the attribute Key conforming to the "peer.service"
+ // semantic conventions. It represents the
+ // [`service.name`](../../resource/semantic_conventions/README.md#service)
+ // of the remote service. SHOULD be equal to the actual `service.name`
+ // resource attribute of the remote service if any.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'AuthTokenCache'
+ PeerServiceKey = attribute.Key("peer.service")
+)
+
+// PeerService returns an attribute KeyValue conforming to the
+// "peer.service" semantic conventions. It represents the
+// [`service.name`](../../resource/semantic_conventions/README.md#service) of
+// the remote service. SHOULD be equal to the actual `service.name` resource
+// attribute of the remote service if any.
+func PeerService(val string) attribute.KeyValue {
+ return PeerServiceKey.String(val)
+}
+
+// These attributes may be used for any operation with an authenticated and/or
+// authorized enduser.
+const (
+ // EnduserIDKey is the attribute Key conforming to the "enduser.id"
+ // semantic conventions. It represents the username or client_id extracted
+ // from the access token or
+ // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header
+ // in the inbound request from outside the system.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'username'
+ EnduserIDKey = attribute.Key("enduser.id")
+
+ // EnduserRoleKey is the attribute Key conforming to the "enduser.role"
+ // semantic conventions. It represents the actual/assumed role the client
+ // is making the request under extracted from token or application security
+ // context.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'admin'
+ EnduserRoleKey = attribute.Key("enduser.role")
+
+ // EnduserScopeKey is the attribute Key conforming to the "enduser.scope"
+ // semantic conventions. It represents the scopes or granted authorities
+ // the client currently possesses extracted from token or application
+ // security context. The value would come from the scope associated with an
+ // [OAuth 2.0 Access
+ // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
+ // value in a [SAML 2.0
+ // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'read:message, write:files'
+ EnduserScopeKey = attribute.Key("enduser.scope")
+)
+
+// EnduserID returns an attribute KeyValue conforming to the "enduser.id"
+// semantic conventions. It represents the username or client_id extracted from
+// the access token or
+// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in
+// the inbound request from outside the system.
+func EnduserID(val string) attribute.KeyValue {
+ return EnduserIDKey.String(val)
+}
+
+// EnduserRole returns an attribute KeyValue conforming to the
+// "enduser.role" semantic conventions. It represents the actual/assumed role
+// the client is making the request under extracted from token or application
+// security context.
+func EnduserRole(val string) attribute.KeyValue {
+ return EnduserRoleKey.String(val)
+}
+
+// EnduserScope returns an attribute KeyValue conforming to the
+// "enduser.scope" semantic conventions. It represents the scopes or granted
+// authorities the client currently possesses extracted from token or
+// application security context. The value would come from the scope associated
+// with an [OAuth 2.0 Access
+// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
+// value in a [SAML 2.0
+// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
+func EnduserScope(val string) attribute.KeyValue {
+ return EnduserScopeKey.String(val)
+}
+
+// These attributes may be used for any operation to store information about a
+// thread that started a span.
+const (
+ // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic
+ // conventions. It represents the current "managed" thread ID (as opposed
+ // to OS thread ID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 42
+ ThreadIDKey = attribute.Key("thread.id")
+
+ // ThreadNameKey is the attribute Key conforming to the "thread.name"
+ // semantic conventions. It represents the current thread name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'main'
+ ThreadNameKey = attribute.Key("thread.name")
+)
+
+// ThreadID returns an attribute KeyValue conforming to the "thread.id"
+// semantic conventions. It represents the current "managed" thread ID (as
+// opposed to OS thread ID).
+func ThreadID(val int) attribute.KeyValue {
+ return ThreadIDKey.Int(val)
+}
+
+// ThreadName returns an attribute KeyValue conforming to the "thread.name"
+// semantic conventions. It represents the current thread name.
+func ThreadName(val string) attribute.KeyValue {
+ return ThreadNameKey.String(val)
+}
+
+// These attributes allow to report this unit of code and therefore to provide
+// more context about the span.
+const (
+ // CodeFunctionKey is the attribute Key conforming to the "code.function"
+ // semantic conventions. It represents the method or function name, or
+ // equivalent (usually rightmost part of the code unit's name).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'serveRequest'
+ CodeFunctionKey = attribute.Key("code.function")
+
+ // CodeNamespaceKey is the attribute Key conforming to the "code.namespace"
+ // semantic conventions. It represents the "namespace" within which
+ // `code.function` is defined. Usually the qualified class or module name,
+ // such that `code.namespace` + some separator + `code.function` form a
+ // unique identifier for the code unit.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'com.example.MyHTTPService'
+ CodeNamespaceKey = attribute.Key("code.namespace")
+
+ // CodeFilepathKey is the attribute Key conforming to the "code.filepath"
+ // semantic conventions. It represents the source code file name that
+ // identifies the code unit as uniquely as possible (preferably an absolute
+ // file path).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/usr/local/MyApplication/content_root/app/index.php'
+ CodeFilepathKey = attribute.Key("code.filepath")
+
+ // CodeLineNumberKey is the attribute Key conforming to the "code.lineno"
+ // semantic conventions. It represents the line number in `code.filepath`
+ // best representing the operation. It SHOULD point within the code unit
+ // named in `code.function`.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 42
+ CodeLineNumberKey = attribute.Key("code.lineno")
+
+ // CodeColumnKey is the attribute Key conforming to the "code.column"
+ // semantic conventions. It represents the column number in `code.filepath`
+ // best representing the operation. It SHOULD point within the code unit
+ // named in `code.function`.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 16
+ CodeColumnKey = attribute.Key("code.column")
+)
+
+// CodeFunction returns an attribute KeyValue conforming to the
+// "code.function" semantic conventions. It represents the method or function
+// name, or equivalent (usually rightmost part of the code unit's name).
+func CodeFunction(val string) attribute.KeyValue {
+ return CodeFunctionKey.String(val)
+}
+
+// CodeNamespace returns an attribute KeyValue conforming to the
+// "code.namespace" semantic conventions. It represents the "namespace" within
+// which `code.function` is defined. Usually the qualified class or module
+// name, such that `code.namespace` + some separator + `code.function` form a
+// unique identifier for the code unit.
+func CodeNamespace(val string) attribute.KeyValue {
+ return CodeNamespaceKey.String(val)
+}
+
+// CodeFilepath returns an attribute KeyValue conforming to the
+// "code.filepath" semantic conventions. It represents the source code file
+// name that identifies the code unit as uniquely as possible (preferably an
+// absolute file path).
+func CodeFilepath(val string) attribute.KeyValue {
+ return CodeFilepathKey.String(val)
+}
+
+// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno"
+// semantic conventions. It represents the line number in `code.filepath` best
+// representing the operation. It SHOULD point within the code unit named in
+// `code.function`.
+func CodeLineNumber(val int) attribute.KeyValue {
+ return CodeLineNumberKey.Int(val)
+}
+
+// CodeColumn returns an attribute KeyValue conforming to the "code.column"
+// semantic conventions. It represents the column number in `code.filepath`
+// best representing the operation. It SHOULD point within the code unit named
+// in `code.function`.
+func CodeColumn(val int) attribute.KeyValue {
+ return CodeColumnKey.Int(val)
+}
+
+// Semantic Convention for HTTP Client
+const (
+ // HTTPURLKey is the attribute Key conforming to the "http.url" semantic
+ // conventions. It represents the full HTTP request URL in the form
+ // `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is
+ // not transmitted over HTTP, but if it is known, it should be included
+ // nevertheless.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv'
+ // Note: `http.url` MUST NOT contain credentials passed via URL in form of
+ // `https://username:password@www.example.com/`. In such case the
+ // attribute's value should be `https://www.example.com/`.
+ HTTPURLKey = attribute.Key("http.url")
+
+ // HTTPResendCountKey is the attribute Key conforming to the
+ // "http.resend_count" semantic conventions. It represents the ordinal
+ // number of request resending attempt (for any reason, including
+ // redirects).
+ //
+ // Type: int
+ // RequirementLevel: Recommended (if and only if request was retried.)
+ // Stability: stable
+ // Examples: 3
+ // Note: The resend count SHOULD be updated each time an HTTP request gets
+ // resent by the client, regardless of what was the cause of the resending
+ // (e.g. redirection, authorization failure, 503 Server Unavailable,
+ // network issues, or any other).
+ HTTPResendCountKey = attribute.Key("http.resend_count")
+)
+
+// HTTPURL returns an attribute KeyValue conforming to the "http.url"
+// semantic conventions. It represents the full HTTP request URL in the form
+// `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is not
+// transmitted over HTTP, but if it is known, it should be included
+// nevertheless.
+func HTTPURL(val string) attribute.KeyValue {
+ return HTTPURLKey.String(val)
+}
+
+// HTTPResendCount returns an attribute KeyValue conforming to the
+// "http.resend_count" semantic conventions. It represents the ordinal number
+// of request resending attempt (for any reason, including redirects).
+func HTTPResendCount(val int) attribute.KeyValue {
+ return HTTPResendCountKey.Int(val)
+}
+
+// Semantic Convention for HTTP Server
+const (
+ // HTTPTargetKey is the attribute Key conforming to the "http.target"
+ // semantic conventions. It represents the full request target as passed in
+ // a HTTP request line or equivalent.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: '/users/12314/?q=ddds'
+ HTTPTargetKey = attribute.Key("http.target")
+
+ // HTTPClientIPKey is the attribute Key conforming to the "http.client_ip"
+ // semantic conventions. It represents the IP address of the original
+ // client behind all proxies, if known (e.g. from
+ // [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '83.164.160.102'
+ // Note: This is not necessarily the same as `net.sock.peer.addr`, which
+ // would
+ // identify the network-level peer, which may be a proxy.
+ //
+ // This attribute should be set when a source of information different
+ // from the one used for `net.sock.peer.addr`, is available even if that
+ // other
+ // source just confirms the same value as `net.sock.peer.addr`.
+ // Rationale: For `net.sock.peer.addr`, one typically does not know if it
+ // comes from a proxy, reverse proxy, or the actual client. Setting
+ // `http.client_ip` when it's the same as `net.sock.peer.addr` means that
+ // one is at least somewhat confident that the address is not that of
+ // the closest proxy.
+ HTTPClientIPKey = attribute.Key("http.client_ip")
+)
+
+// HTTPTarget returns an attribute KeyValue conforming to the "http.target"
+// semantic conventions. It represents the full request target as passed in a
+// HTTP request line or equivalent.
+func HTTPTarget(val string) attribute.KeyValue {
+ return HTTPTargetKey.String(val)
+}
+
+// HTTPClientIP returns an attribute KeyValue conforming to the
+// "http.client_ip" semantic conventions. It represents the IP address of the
+// original client behind all proxies, if known (e.g. from
+// [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)).
+func HTTPClientIP(val string) attribute.KeyValue {
+ return HTTPClientIPKey.String(val)
+}
+
+// The `aws` conventions apply to operations using the AWS SDK. They map
+// request or response parameters in AWS SDK API calls to attributes on a Span.
+// The conventions have been collected over time based on feedback from AWS
+// users of tracing and will continue to evolve as new interesting conventions
+// are found.
+// Some descriptions are also provided for populating general OpenTelemetry
+// semantic conventions based on these APIs.
+const (
+ // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id"
+ // semantic conventions. It represents the AWS request ID as returned in
+ // the response headers `x-amz-request-id` or `x-amz-requestid`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ'
+ AWSRequestIDKey = attribute.Key("aws.request_id")
+)
+
+// AWSRequestID returns an attribute KeyValue conforming to the
+// "aws.request_id" semantic conventions. It represents the AWS request ID as
+// returned in the response headers `x-amz-request-id` or `x-amz-requestid`.
+func AWSRequestID(val string) attribute.KeyValue {
+ return AWSRequestIDKey.String(val)
+}
+
+// Attributes that exist for multiple DynamoDB request types.
+const (
+ // AWSDynamoDBTableNamesKey is the attribute Key conforming to the
+ // "aws.dynamodb.table_names" semantic conventions. It represents the keys
+ // in the `RequestItems` object field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Users', 'Cats'
+ AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names")
+
+ // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the
+ // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+ // JSON-serialized value of each item in the `ConsumedCapacity` response
+ // field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": {
+ // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" :
+ // { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table":
+ // { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number }, "TableName": "string",
+ // "WriteCapacityUnits": number }'
+ AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity")
+
+ // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to
+ // the "aws.dynamodb.item_collection_metrics" semantic conventions. It
+ // represents the JSON-serialized value of the `ItemCollectionMetrics`
+ // response field.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B":
+ // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": {
+ // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ],
+ // "NULL": boolean, "S": "string", "SS": [ "string" ] } },
+ // "SizeEstimateRangeGB": [ number ] } ] }'
+ AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics")
+
+ // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to
+ // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It
+ // represents the value of the `ProvisionedThroughput.ReadCapacityUnits`
+ // request parameter.
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity")
+
+ // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming
+ // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions.
+ // It represents the value of the
+ // `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity")
+
+ // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the
+ // "aws.dynamodb.consistent_read" semantic conventions. It represents the
+ // value of the `ConsistentRead` request parameter.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read")
+
+ // AWSDynamoDBProjectionKey is the attribute Key conforming to the
+ // "aws.dynamodb.projection" semantic conventions. It represents the value
+ // of the `ProjectionExpression` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Title', 'Title, Price, Color', 'Title, Description,
+ // RelatedItems, ProductReviews'
+ AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection")
+
+ // AWSDynamoDBLimitKey is the attribute Key conforming to the
+ // "aws.dynamodb.limit" semantic conventions. It represents the value of
+ // the `Limit` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 10
+ AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit")
+
+ // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the
+ // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+ // value of the `AttributesToGet` request parameter.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'lives', 'id'
+ AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get")
+
+ // AWSDynamoDBIndexNameKey is the attribute Key conforming to the
+ // "aws.dynamodb.index_name" semantic conventions. It represents the value
+ // of the `IndexName` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'name_to_group'
+ AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name")
+
+ // AWSDynamoDBSelectKey is the attribute Key conforming to the
+ // "aws.dynamodb.select" semantic conventions. It represents the value of
+ // the `Select` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'ALL_ATTRIBUTES', 'COUNT'
+ AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select")
+)
+
+// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_names" semantic conventions. It represents the keys in
+// the `RequestItems` object field.
+func AWSDynamoDBTableNames(val ...string) attribute.KeyValue {
+ return AWSDynamoDBTableNamesKey.StringSlice(val)
+}
+
+// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to
+// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+// JSON-serialized value of each item in the `ConsumedCapacity` response field.
+func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue {
+ return AWSDynamoDBConsumedCapacityKey.StringSlice(val)
+}
+
+// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming
+// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It
+// represents the JSON-serialized value of the `ItemCollectionMetrics` response
+// field.
+func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue {
+ return AWSDynamoDBItemCollectionMetricsKey.String(val)
+}
+
+// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue
+// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic
+// conventions. It represents the value of the
+// `ProvisionedThroughput.ReadCapacityUnits` request parameter.
+func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue {
+ return AWSDynamoDBProvisionedReadCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue
+// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic
+// conventions. It represents the value of the
+// `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue {
+ return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the
+// "aws.dynamodb.consistent_read" semantic conventions. It represents the value
+// of the `ConsistentRead` request parameter.
+func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue {
+ return AWSDynamoDBConsistentReadKey.Bool(val)
+}
+
+// AWSDynamoDBProjection returns an attribute KeyValue conforming to the
+// "aws.dynamodb.projection" semantic conventions. It represents the value of
+// the `ProjectionExpression` request parameter.
+func AWSDynamoDBProjection(val string) attribute.KeyValue {
+ return AWSDynamoDBProjectionKey.String(val)
+}
+
+// AWSDynamoDBLimit returns an attribute KeyValue conforming to the
+// "aws.dynamodb.limit" semantic conventions. It represents the value of the
+// `Limit` request parameter.
+func AWSDynamoDBLimit(val int) attribute.KeyValue {
+ return AWSDynamoDBLimitKey.Int(val)
+}
+
+// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to
+// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+// value of the `AttributesToGet` request parameter.
+func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue {
+ return AWSDynamoDBAttributesToGetKey.StringSlice(val)
+}
+
+// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the
+// "aws.dynamodb.index_name" semantic conventions. It represents the value of
+// the `IndexName` request parameter.
+func AWSDynamoDBIndexName(val string) attribute.KeyValue {
+ return AWSDynamoDBIndexNameKey.String(val)
+}
+
+// AWSDynamoDBSelect returns an attribute KeyValue conforming to the
+// "aws.dynamodb.select" semantic conventions. It represents the value of the
+// `Select` request parameter.
+func AWSDynamoDBSelect(val string) attribute.KeyValue {
+ return AWSDynamoDBSelectKey.String(val)
+}
+
+// DynamoDB.CreateTable
+const (
+ // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to
+ // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It
+ // represents the JSON-serialized value of each item of the
+ // `GlobalSecondaryIndexes` request field
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName":
+ // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [
+ // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": {
+ // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }'
+ AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes")
+
+ // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to
+ // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
+ // represents the JSON-serialized value of each item of the
+ // `LocalSecondaryIndexes` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "IndexARN": "string", "IndexName": "string",
+ // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ {
+ // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }'
+ AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes")
+)
+
+// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_indexes" semantic
+// conventions. It represents the JSON-serialized value of each item of the
+// `GlobalSecondaryIndexes` request field
+func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue {
+ return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val)
+}
+
+// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming
+// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
+// represents the JSON-serialized value of each item of the
+// `LocalSecondaryIndexes` request field.
+func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue {
+ return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val)
+}
+
+// DynamoDB.ListTables
+const (
+ // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the
+ // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents
+ // the value of the `ExclusiveStartTableName` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Users', 'CatsTable'
+ AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table")
+
+ // AWSDynamoDBTableCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.table_count" semantic conventions. It represents the the
+ // number of items in the `TableNames` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 20
+ AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count")
+)
+
+// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming
+// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It
+// represents the value of the `ExclusiveStartTableName` request parameter.
+func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue {
+ return AWSDynamoDBExclusiveStartTableKey.String(val)
+}
+
+// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_count" semantic conventions. It represents the the
+// number of items in the `TableNames` response parameter.
+func AWSDynamoDBTableCount(val int) attribute.KeyValue {
+ return AWSDynamoDBTableCountKey.Int(val)
+}
+
+// DynamoDB.Query
+const (
+ // AWSDynamoDBScanForwardKey is the attribute Key conforming to the
+ // "aws.dynamodb.scan_forward" semantic conventions. It represents the
+ // value of the `ScanIndexForward` request parameter.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward")
+)
+
+// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of
+// the `ScanIndexForward` request parameter.
+func AWSDynamoDBScanForward(val bool) attribute.KeyValue {
+ return AWSDynamoDBScanForwardKey.Bool(val)
+}
+
+// DynamoDB.Scan
+const (
+ // AWSDynamoDBSegmentKey is the attribute Key conforming to the
+ // "aws.dynamodb.segment" semantic conventions. It represents the value of
+ // the `Segment` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 10
+ AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment")
+
+ // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the
+ // "aws.dynamodb.total_segments" semantic conventions. It represents the
+ // value of the `TotalSegments` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 100
+ AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments")
+
+ // AWSDynamoDBCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.count" semantic conventions. It represents the value of
+ // the `Count` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 10
+ AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count")
+
+ // AWSDynamoDBScannedCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.scanned_count" semantic conventions. It represents the
+ // value of the `ScannedCount` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 50
+ AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count")
+)
+
+// AWSDynamoDBSegment returns an attribute KeyValue conforming to the
+// "aws.dynamodb.segment" semantic conventions. It represents the value of the
+// `Segment` request parameter.
+func AWSDynamoDBSegment(val int) attribute.KeyValue {
+ return AWSDynamoDBSegmentKey.Int(val)
+}
+
+// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the
+// "aws.dynamodb.total_segments" semantic conventions. It represents the value
+// of the `TotalSegments` request parameter.
+func AWSDynamoDBTotalSegments(val int) attribute.KeyValue {
+ return AWSDynamoDBTotalSegmentsKey.Int(val)
+}
+
+// AWSDynamoDBCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.count" semantic conventions. It represents the value of the
+// `Count` response parameter.
+func AWSDynamoDBCount(val int) attribute.KeyValue {
+ return AWSDynamoDBCountKey.Int(val)
+}
+
+// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scanned_count" semantic conventions. It represents the value
+// of the `ScannedCount` response parameter.
+func AWSDynamoDBScannedCount(val int) attribute.KeyValue {
+ return AWSDynamoDBScannedCountKey.Int(val)
+}
+
+// DynamoDB.UpdateTable
+const (
+ // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to
+ // the "aws.dynamodb.attribute_definitions" semantic conventions. It
+ // represents the JSON-serialized value of each item in the
+ // `AttributeDefinitions` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "AttributeName": "string", "AttributeType": "string" }'
+ AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions")
+
+ // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key
+ // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+ // conventions. It represents the JSON-serialized value of each item in the
+ // the `GlobalSecondaryIndexUpdates` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ {
+ // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" },
+ // "ProvisionedThroughput": { "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }'
+ AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates")
+)
+
+// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming
+// to the "aws.dynamodb.attribute_definitions" semantic conventions. It
+// represents the JSON-serialized value of each item in the
+// `AttributeDefinitions` request field.
+func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue {
+ return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val)
+}
+
+// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+// conventions. It represents the JSON-serialized value of each item in the the
+// `GlobalSecondaryIndexUpdates` request field.
+func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue {
+ return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val)
+}
+
+// Attributes that exist for S3 request types.
+const (
+ // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket"
+ // semantic conventions. It represents the S3 bucket name the request
+ // refers to. Corresponds to the `--bucket` parameter of the [S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+ // operations.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'some-bucket-name'
+ // Note: The `bucket` attribute is applicable to all S3 operations that
+ // reference a bucket, i.e. that require the bucket name as a mandatory
+ // parameter.
+ // This applies to almost all S3 operations except `list-buckets`.
+ AWSS3BucketKey = attribute.Key("aws.s3.bucket")
+
+ // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic
+ // conventions. It represents the S3 object key the request refers to.
+ // Corresponds to the `--key` parameter of the [S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+ // operations.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'someFile.yml'
+ // Note: The `key` attribute is applicable to all object-related S3
+ // operations, i.e. that require the object key as a mandatory parameter.
+ // This applies in particular to the following operations:
+ //
+ // -
+ // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
+ // -
+ // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
+ // -
+ // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html)
+ // -
+ // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html)
+ // -
+ // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html)
+ // -
+ // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html)
+ // -
+ // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html)
+ // -
+ // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
+ // -
+ // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
+ // -
+ // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html)
+ // -
+ // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
+ // -
+ // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+ // -
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ AWSS3KeyKey = attribute.Key("aws.s3.key")
+
+ // AWSS3CopySourceKey is the attribute Key conforming to the
+ // "aws.s3.copy_source" semantic conventions. It represents the source
+ // object (in the form `bucket`/`key`) for the copy operation.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'someFile.yml'
+ // Note: The `copy_source` attribute applies to S3 copy operations and
+ // corresponds to the `--copy-source` parameter
+ // of the [copy-object operation within the S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html).
+ // This applies in particular to the following operations:
+ //
+ // -
+ // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
+ // -
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source")
+
+ // AWSS3UploadIDKey is the attribute Key conforming to the
+ // "aws.s3.upload_id" semantic conventions. It represents the upload ID
+ // that identifies the multipart upload.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ'
+ // Note: The `upload_id` attribute applies to S3 multipart-upload
+ // operations and corresponds to the `--upload-id` parameter
+ // of the [S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+ // multipart operations.
+ // This applies in particular to the following operations:
+ //
+ // -
+ // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
+ // -
+ // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
+ // -
+ // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
+ // -
+ // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+ // -
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id")
+
+ // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete"
+ // semantic conventions. It represents the delete request container that
+ // specifies the objects to be deleted.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean'
+ // Note: The `delete` attribute is only applicable to the
+ // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
+ // operation.
+ // The `delete` attribute corresponds to the `--delete` parameter of the
+ // [delete-objects operation within the S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html).
+ AWSS3DeleteKey = attribute.Key("aws.s3.delete")
+
+ // AWSS3PartNumberKey is the attribute Key conforming to the
+ // "aws.s3.part_number" semantic conventions. It represents the part number
+ // of the part being uploaded in a multipart-upload operation. This is a
+ // positive integer between 1 and 10,000.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 3456
+ // Note: The `part_number` attribute is only applicable to the
+ // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+ // and
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ // operations.
+ // The `part_number` attribute corresponds to the `--part-number` parameter
+ // of the
+ // [upload-part operation within the S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html).
+ AWSS3PartNumberKey = attribute.Key("aws.s3.part_number")
+)
+
+// AWSS3Bucket returns an attribute KeyValue conforming to the
+// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the
+// request refers to. Corresponds to the `--bucket` parameter of the [S3
+// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+// operations.
+func AWSS3Bucket(val string) attribute.KeyValue {
+ return AWSS3BucketKey.String(val)
+}
+
+// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key"
+// semantic conventions. It represents the S3 object key the request refers to.
+// Corresponds to the `--key` parameter of the [S3
+// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+// operations.
+func AWSS3Key(val string) attribute.KeyValue {
+ return AWSS3KeyKey.String(val)
+}
+
+// AWSS3CopySource returns an attribute KeyValue conforming to the
+// "aws.s3.copy_source" semantic conventions. It represents the source object
+// (in the form `bucket`/`key`) for the copy operation.
+func AWSS3CopySource(val string) attribute.KeyValue {
+ return AWSS3CopySourceKey.String(val)
+}
+
+// AWSS3UploadID returns an attribute KeyValue conforming to the
+// "aws.s3.upload_id" semantic conventions. It represents the upload ID that
+// identifies the multipart upload.
+func AWSS3UploadID(val string) attribute.KeyValue {
+ return AWSS3UploadIDKey.String(val)
+}
+
+// AWSS3Delete returns an attribute KeyValue conforming to the
+// "aws.s3.delete" semantic conventions. It represents the delete request
+// container that specifies the objects to be deleted.
+func AWSS3Delete(val string) attribute.KeyValue {
+ return AWSS3DeleteKey.String(val)
+}
+
+// AWSS3PartNumber returns an attribute KeyValue conforming to the
+// "aws.s3.part_number" semantic conventions. It represents the part number of
+// the part being uploaded in a multipart-upload operation. This is a positive
+// integer between 1 and 10,000.
+func AWSS3PartNumber(val int) attribute.KeyValue {
+ return AWSS3PartNumberKey.Int(val)
+}
+
+// Semantic conventions to apply when instrumenting the GraphQL implementation.
+// They map GraphQL operations to attributes on a Span.
+const (
+ // GraphqlOperationNameKey is the attribute Key conforming to the
+ // "graphql.operation.name" semantic conventions. It represents the name of
+ // the operation being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'findBookByID'
+ GraphqlOperationNameKey = attribute.Key("graphql.operation.name")
+
+ // GraphqlOperationTypeKey is the attribute Key conforming to the
+ // "graphql.operation.type" semantic conventions. It represents the type of
+ // the operation being executed.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'query', 'mutation', 'subscription'
+ GraphqlOperationTypeKey = attribute.Key("graphql.operation.type")
+
+ // GraphqlDocumentKey is the attribute Key conforming to the
+ // "graphql.document" semantic conventions. It represents the GraphQL
+ // document being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'query findBookByID { bookByID(id: ?) { name } }'
+ // Note: The value may be sanitized to exclude sensitive information.
+ GraphqlDocumentKey = attribute.Key("graphql.document")
+)
+
+var (
+ // GraphQL query
+ GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query")
+ // GraphQL mutation
+ GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation")
+ // GraphQL subscription
+ GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription")
+)
+
+// GraphqlOperationName returns an attribute KeyValue conforming to the
+// "graphql.operation.name" semantic conventions. It represents the name of the
+// operation being executed.
+func GraphqlOperationName(val string) attribute.KeyValue {
+ return GraphqlOperationNameKey.String(val)
+}
+
+// GraphqlDocument returns an attribute KeyValue conforming to the
+// "graphql.document" semantic conventions. It represents the GraphQL document
+// being executed.
+func GraphqlDocument(val string) attribute.KeyValue {
+ return GraphqlDocumentKey.String(val)
+}
+
+// General attributes used in messaging systems.
+const (
+ // MessagingSystemKey is the attribute Key conforming to the
+ // "messaging.system" semantic conventions. It represents a string
+ // identifying the messaging system.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS'
+ MessagingSystemKey = attribute.Key("messaging.system")
+
+ // MessagingOperationKey is the attribute Key conforming to the
+ // "messaging.operation" semantic conventions. It represents a string
+ // identifying the kind of messaging operation as defined in the [Operation
+ // names](#operation-names) section above.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ // Note: If a custom value is used, it MUST be of low cardinality.
+ MessagingOperationKey = attribute.Key("messaging.operation")
+
+ // MessagingBatchMessageCountKey is the attribute Key conforming to the
+ // "messaging.batch.message_count" semantic conventions. It represents the
+ // number of messages sent, received, or processed in the scope of the
+ // batching operation.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If the span describes an
+ // operation on a batch of messages.)
+ // Stability: stable
+ // Examples: 0, 1, 2
+ // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on
+ // spans that operate with a single message. When a messaging client
+ // library supports both batch and single-message API for the same
+ // operation, instrumentations SHOULD use `messaging.batch.message_count`
+ // for batching APIs and SHOULD NOT use it for single-message APIs.
+ MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count")
+)
+
+var (
+ // publish
+ MessagingOperationPublish = MessagingOperationKey.String("publish")
+ // receive
+ MessagingOperationReceive = MessagingOperationKey.String("receive")
+ // process
+ MessagingOperationProcess = MessagingOperationKey.String("process")
+)
+
+// MessagingSystem returns an attribute KeyValue conforming to the
+// "messaging.system" semantic conventions. It represents a string identifying
+// the messaging system.
+func MessagingSystem(val string) attribute.KeyValue {
+ return MessagingSystemKey.String(val)
+}
+
+// MessagingBatchMessageCount returns an attribute KeyValue conforming to
+// the "messaging.batch.message_count" semantic conventions. It represents the
+// number of messages sent, received, or processed in the scope of the batching
+// operation.
+func MessagingBatchMessageCount(val int) attribute.KeyValue {
+ return MessagingBatchMessageCountKey.Int(val)
+}
+
+// Semantic convention for a consumer of messages received from a messaging
+// system
+const (
+ // MessagingConsumerIDKey is the attribute Key conforming to the
+ // "messaging.consumer.id" semantic conventions. It represents the
+ // identifier for the consumer receiving a message. For Kafka, set it to
+ // `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if
+ // both are present, or only `messaging.kafka.consumer.group`. For brokers,
+ // such as RabbitMQ and Artemis, set it to the `client_id` of the client
+ // consuming the message.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'mygroup - client-6'
+ MessagingConsumerIDKey = attribute.Key("messaging.consumer.id")
+)
+
+// MessagingConsumerID returns an attribute KeyValue conforming to the
+// "messaging.consumer.id" semantic conventions. It represents the identifier
+// for the consumer receiving a message. For Kafka, set it to
+// `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if both
+// are present, or only `messaging.kafka.consumer.group`. For brokers, such as
+// RabbitMQ and Artemis, set it to the `client_id` of the client consuming the
+// message.
+func MessagingConsumerID(val string) attribute.KeyValue {
+ return MessagingConsumerIDKey.String(val)
+}
+
+// Semantic conventions for remote procedure calls.
+const (
+ // RPCSystemKey is the attribute Key conforming to the "rpc.system"
+ // semantic conventions. It represents a string identifying the remoting
+ // system. See below for a list of well-known identifiers.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ RPCSystemKey = attribute.Key("rpc.system")
+
+ // RPCServiceKey is the attribute Key conforming to the "rpc.service"
+ // semantic conventions. It represents the full (logical) name of the
+ // service being called, including its package name, if applicable.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'myservice.EchoService'
+ // Note: This is the logical name of the service from the RPC interface
+ // perspective, which can be different from the name of any implementing
+ // class. The `code.namespace` attribute may be used to store the latter
+ // (despite the attribute name, it may include a class name; e.g., class
+ // with method actually executing the call on the server side, RPC client
+ // stub class on the client side).
+ RPCServiceKey = attribute.Key("rpc.service")
+
+ // RPCMethodKey is the attribute Key conforming to the "rpc.method"
+ // semantic conventions. It represents the name of the (logical) method
+ // being called, must be equal to the $method part in the span name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'exampleMethod'
+ // Note: This is the logical name of the method from the RPC interface
+ // perspective, which can be different from the name of any implementing
+ // method/function. The `code.function` attribute may be used to store the
+ // latter (e.g., method actually executing the call on the server side, RPC
+ // client stub method on the client side).
+ RPCMethodKey = attribute.Key("rpc.method")
+)
+
+var (
+ // gRPC
+ RPCSystemGRPC = RPCSystemKey.String("grpc")
+ // Java RMI
+ RPCSystemJavaRmi = RPCSystemKey.String("java_rmi")
+ // .NET WCF
+ RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf")
+ // Apache Dubbo
+ RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo")
+ // Connect RPC
+ RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc")
+)
+
+// RPCService returns an attribute KeyValue conforming to the "rpc.service"
+// semantic conventions. It represents the full (logical) name of the service
+// being called, including its package name, if applicable.
+func RPCService(val string) attribute.KeyValue {
+ return RPCServiceKey.String(val)
+}
+
+// RPCMethod returns an attribute KeyValue conforming to the "rpc.method"
+// semantic conventions. It represents the name of the (logical) method being
+// called, must be equal to the $method part in the span name.
+func RPCMethod(val string) attribute.KeyValue {
+ return RPCMethodKey.String(val)
+}
+
+// Tech-specific attributes for gRPC.
+const (
+ // RPCGRPCStatusCodeKey is the attribute Key conforming to the
+ // "rpc.grpc.status_code" semantic conventions. It represents the [numeric
+ // status
+ // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of
+ // the gRPC request.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
+)
+
+var (
+ // OK
+ RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0)
+ // CANCELLED
+ RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1)
+ // UNKNOWN
+ RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2)
+ // INVALID_ARGUMENT
+ RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3)
+ // DEADLINE_EXCEEDED
+ RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4)
+ // NOT_FOUND
+ RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5)
+ // ALREADY_EXISTS
+ RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6)
+ // PERMISSION_DENIED
+ RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7)
+ // RESOURCE_EXHAUSTED
+ RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8)
+ // FAILED_PRECONDITION
+ RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9)
+ // ABORTED
+ RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10)
+ // OUT_OF_RANGE
+ RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11)
+ // UNIMPLEMENTED
+ RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12)
+ // INTERNAL
+ RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13)
+ // UNAVAILABLE
+ RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14)
+ // DATA_LOSS
+ RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15)
+ // UNAUTHENTICATED
+ RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16)
+)
+
+// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/).
+const (
+ // RPCJsonrpcVersionKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.version" semantic conventions. It represents the protocol
+ // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
+ // does not specify this, the value can be omitted.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If other than the default
+ // version (`1.0`))
+ // Stability: stable
+ // Examples: '2.0', '1.0'
+ RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version")
+
+ // RPCJsonrpcRequestIDKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
+ // property of request or response. Since protocol allows id to be int,
+ // string, `null` or missing (for notifications), value is expected to be
+ // cast to string for simplicity. Use empty string in case of `null` value.
+ // Omit entirely if this is a notification.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '10', 'request-7', ''
+ RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id")
+
+ // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.error_code" semantic conventions. It represents the
+ // `error.code` property of response if it is an error response.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If response is not successful.)
+ // Stability: stable
+ // Examples: -32700, 100
+ RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code")
+
+ // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.error_message" semantic conventions. It represents the
+ // `error.message` property of response if it is an error response.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Parse error', 'User already exists'
+ RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message")
+)
+
+// RPCJsonrpcVersion returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.version" semantic conventions. It represents the protocol
+// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
+// does not specify this, the value can be omitted.
+func RPCJsonrpcVersion(val string) attribute.KeyValue {
+ return RPCJsonrpcVersionKey.String(val)
+}
+
+// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
+// property of request or response. Since protocol allows id to be int, string,
+// `null` or missing (for notifications), value is expected to be cast to
+// string for simplicity. Use empty string in case of `null` value. Omit
+// entirely if this is a notification.
+func RPCJsonrpcRequestID(val string) attribute.KeyValue {
+ return RPCJsonrpcRequestIDKey.String(val)
+}
+
+// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_code" semantic conventions. It represents the
+// `error.code` property of response if it is an error response.
+func RPCJsonrpcErrorCode(val int) attribute.KeyValue {
+ return RPCJsonrpcErrorCodeKey.Int(val)
+}
+
+// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_message" semantic conventions. It represents the
+// `error.message` property of response if it is an error response.
+func RPCJsonrpcErrorMessage(val string) attribute.KeyValue {
+ return RPCJsonrpcErrorMessageKey.String(val)
+}
+
+// Tech-specific attributes for Connect RPC.
+const (
+ // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the
+ // "rpc.connect_rpc.error_code" semantic conventions. It represents the
+ // [error codes](https://connect.build/docs/protocol/#error-codes) of the
+ // Connect request. Error codes are always string values.
+ //
+ // Type: Enum
+ // RequirementLevel: ConditionallyRequired (If response is not successful
+ // and if error code available.)
+ // Stability: stable
+ RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code")
+)
+
+var (
+ // cancelled
+ RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled")
+ // unknown
+ RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown")
+ // invalid_argument
+ RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument")
+ // deadline_exceeded
+ RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded")
+ // not_found
+ RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found")
+ // already_exists
+ RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists")
+ // permission_denied
+ RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied")
+ // resource_exhausted
+ RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted")
+ // failed_precondition
+ RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition")
+ // aborted
+ RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted")
+ // out_of_range
+ RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range")
+ // unimplemented
+ RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented")
+ // internal
+ RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal")
+ // unavailable
+ RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable")
+ // data_loss
+ RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss")
+ // unauthenticated
+ RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated")
+)
diff --git a/vendor/go.opentelemetry.io/otel/trace/auto.go b/vendor/go.opentelemetry.io/otel/trace/auto.go
new file mode 100644
index 000000000..7e2910025
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/auto.go
@@ -0,0 +1,661 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/trace"
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "math"
+ "os"
+ "reflect"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+ "unicode/utf8"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
+ "go.opentelemetry.io/otel/trace/embedded"
+ "go.opentelemetry.io/otel/trace/internal/telemetry"
+)
+
+// newAutoTracerProvider returns an auto-instrumentable [trace.TracerProvider].
+// If an [go.opentelemetry.io/auto.Instrumentation] is configured to instrument
+// the process using the returned TracerProvider, all of the telemetry it
+// produces will be processed and handled by that Instrumentation. By default,
+// if no Instrumentation instruments the TracerProvider it will not generate
+// any trace telemetry.
+func newAutoTracerProvider() TracerProvider { return tracerProviderInstance }
+
+var tracerProviderInstance = new(autoTracerProvider)
+
+type autoTracerProvider struct{ embedded.TracerProvider }
+
+var _ TracerProvider = autoTracerProvider{}
+
+func (p autoTracerProvider) Tracer(name string, opts ...TracerOption) Tracer {
+ cfg := NewTracerConfig(opts...)
+ return autoTracer{
+ name: name,
+ version: cfg.InstrumentationVersion(),
+ schemaURL: cfg.SchemaURL(),
+ }
+}
+
+type autoTracer struct {
+ embedded.Tracer
+
+ name, schemaURL, version string
+}
+
+var _ Tracer = autoTracer{}
+
+func (t autoTracer) Start(ctx context.Context, name string, opts ...SpanStartOption) (context.Context, Span) {
+ var psc SpanContext
+ sampled := true
+ span := new(autoSpan)
+
+ // Ask eBPF for sampling decision and span context info.
+ t.start(ctx, span, &psc, &sampled, &span.spanContext)
+
+ span.sampled.Store(sampled)
+
+ ctx = ContextWithSpan(ctx, span)
+
+ if sampled {
+ // Only build traces if sampled.
+ cfg := NewSpanStartConfig(opts...)
+ span.traces, span.span = t.traces(name, cfg, span.spanContext, psc)
+ }
+
+ return ctx, span
+}
+
+// Expected to be implemented in eBPF.
+//
+//go:noinline
+func (t *autoTracer) start(
+ ctx context.Context,
+ spanPtr *autoSpan,
+ psc *SpanContext,
+ sampled *bool,
+ sc *SpanContext,
+) {
+ start(ctx, spanPtr, psc, sampled, sc)
+}
+
+// start is used for testing.
+var start = func(context.Context, *autoSpan, *SpanContext, *bool, *SpanContext) {}
+
+func (t autoTracer) traces(name string, cfg SpanConfig, sc, psc SpanContext) (*telemetry.Traces, *telemetry.Span) {
+ span := &telemetry.Span{
+ TraceID: telemetry.TraceID(sc.TraceID()),
+ SpanID: telemetry.SpanID(sc.SpanID()),
+ Flags: uint32(sc.TraceFlags()),
+ TraceState: sc.TraceState().String(),
+ ParentSpanID: telemetry.SpanID(psc.SpanID()),
+ Name: name,
+ Kind: spanKind(cfg.SpanKind()),
+ }
+
+ span.Attrs, span.DroppedAttrs = convCappedAttrs(maxSpan.Attrs, cfg.Attributes())
+
+ links := cfg.Links()
+ if limit := maxSpan.Links; limit == 0 {
+ n := int64(len(links))
+ if n > 0 {
+ span.DroppedLinks = uint32(min(n, math.MaxUint32)) // nolint: gosec // Bounds checked.
+ }
+ } else {
+ if limit > 0 {
+ n := int64(max(len(links)-limit, 0))
+ span.DroppedLinks = uint32(min(n, math.MaxUint32)) // nolint: gosec // Bounds checked.
+ links = links[n:]
+ }
+ span.Links = convLinks(links)
+ }
+
+ if t := cfg.Timestamp(); !t.IsZero() {
+ span.StartTime = cfg.Timestamp()
+ } else {
+ span.StartTime = time.Now()
+ }
+
+ return &telemetry.Traces{
+ ResourceSpans: []*telemetry.ResourceSpans{
+ {
+ ScopeSpans: []*telemetry.ScopeSpans{
+ {
+ Scope: &telemetry.Scope{
+ Name: t.name,
+ Version: t.version,
+ },
+ Spans: []*telemetry.Span{span},
+ SchemaURL: t.schemaURL,
+ },
+ },
+ },
+ },
+ }, span
+}
+
+func spanKind(kind SpanKind) telemetry.SpanKind {
+ switch kind {
+ case SpanKindInternal:
+ return telemetry.SpanKindInternal
+ case SpanKindServer:
+ return telemetry.SpanKindServer
+ case SpanKindClient:
+ return telemetry.SpanKindClient
+ case SpanKindProducer:
+ return telemetry.SpanKindProducer
+ case SpanKindConsumer:
+ return telemetry.SpanKindConsumer
+ }
+ return telemetry.SpanKind(0) // undefined.
+}
+
+type autoSpan struct {
+ embedded.Span
+
+ spanContext SpanContext
+ sampled atomic.Bool
+
+ mu sync.Mutex
+ traces *telemetry.Traces
+ span *telemetry.Span
+}
+
+func (s *autoSpan) SpanContext() SpanContext {
+ if s == nil {
+ return SpanContext{}
+ }
+ // s.spanContext is immutable, do not acquire lock s.mu.
+ return s.spanContext
+}
+
+func (s *autoSpan) IsRecording() bool {
+ if s == nil {
+ return false
+ }
+
+ return s.sampled.Load()
+}
+
+func (s *autoSpan) SetStatus(c codes.Code, msg string) {
+ if s == nil || !s.sampled.Load() {
+ return
+ }
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ if s.span.Status == nil {
+ s.span.Status = new(telemetry.Status)
+ }
+
+ s.span.Status.Message = msg
+
+ switch c {
+ case codes.Unset:
+ s.span.Status.Code = telemetry.StatusCodeUnset
+ case codes.Error:
+ s.span.Status.Code = telemetry.StatusCodeError
+ case codes.Ok:
+ s.span.Status.Code = telemetry.StatusCodeOK
+ }
+}
+
+func (s *autoSpan) SetAttributes(attrs ...attribute.KeyValue) {
+ if s == nil || !s.sampled.Load() {
+ return
+ }
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ limit := maxSpan.Attrs
+ if limit == 0 {
+ // No attributes allowed.
+ n := int64(len(attrs))
+ if n > 0 {
+ s.span.DroppedAttrs += uint32(min(n, math.MaxUint32)) // nolint: gosec // Bounds checked.
+ }
+ return
+ }
+
+ m := make(map[string]int)
+ for i, a := range s.span.Attrs {
+ m[a.Key] = i
+ }
+
+ for _, a := range attrs {
+ val := convAttrValue(a.Value)
+ if val.Empty() {
+ s.span.DroppedAttrs++
+ continue
+ }
+
+ if idx, ok := m[string(a.Key)]; ok {
+ s.span.Attrs[idx] = telemetry.Attr{
+ Key: string(a.Key),
+ Value: val,
+ }
+ } else if limit < 0 || len(s.span.Attrs) < limit {
+ s.span.Attrs = append(s.span.Attrs, telemetry.Attr{
+ Key: string(a.Key),
+ Value: val,
+ })
+ m[string(a.Key)] = len(s.span.Attrs) - 1
+ } else {
+ s.span.DroppedAttrs++
+ }
+ }
+}
+
+// convCappedAttrs converts up to limit attrs into a []telemetry.Attr. The
+// number of dropped attributes is also returned.
+func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, uint32) {
+ n := len(attrs)
+ if limit == 0 {
+ var out uint32
+ if n > 0 {
+ out = uint32(min(int64(n), math.MaxUint32)) // nolint: gosec // Bounds checked.
+ }
+ return nil, out
+ }
+
+ if limit < 0 {
+ // Unlimited.
+ return convAttrs(attrs), 0
+ }
+
+ if n < 0 {
+ n = 0
+ }
+
+ limit = min(n, limit)
+ return convAttrs(attrs[:limit]), uint32(n - limit) // nolint: gosec // Bounds checked.
+}
+
+func convAttrs(attrs []attribute.KeyValue) []telemetry.Attr {
+ if len(attrs) == 0 {
+ // Avoid allocations if not necessary.
+ return nil
+ }
+
+ out := make([]telemetry.Attr, 0, len(attrs))
+ for _, attr := range attrs {
+ key := string(attr.Key)
+ val := convAttrValue(attr.Value)
+ if val.Empty() {
+ continue
+ }
+ out = append(out, telemetry.Attr{Key: key, Value: val})
+ }
+ return out
+}
+
+func convAttrValue(value attribute.Value) telemetry.Value {
+ switch value.Type() {
+ case attribute.BOOL:
+ return telemetry.BoolValue(value.AsBool())
+ case attribute.INT64:
+ return telemetry.Int64Value(value.AsInt64())
+ case attribute.FLOAT64:
+ return telemetry.Float64Value(value.AsFloat64())
+ case attribute.STRING:
+ v := truncate(maxSpan.AttrValueLen, value.AsString())
+ return telemetry.StringValue(v)
+ case attribute.BOOLSLICE:
+ slice := value.AsBoolSlice()
+ out := make([]telemetry.Value, 0, len(slice))
+ for _, v := range slice {
+ out = append(out, telemetry.BoolValue(v))
+ }
+ return telemetry.SliceValue(out...)
+ case attribute.INT64SLICE:
+ slice := value.AsInt64Slice()
+ out := make([]telemetry.Value, 0, len(slice))
+ for _, v := range slice {
+ out = append(out, telemetry.Int64Value(v))
+ }
+ return telemetry.SliceValue(out...)
+ case attribute.FLOAT64SLICE:
+ slice := value.AsFloat64Slice()
+ out := make([]telemetry.Value, 0, len(slice))
+ for _, v := range slice {
+ out = append(out, telemetry.Float64Value(v))
+ }
+ return telemetry.SliceValue(out...)
+ case attribute.STRINGSLICE:
+ slice := value.AsStringSlice()
+ out := make([]telemetry.Value, 0, len(slice))
+ for _, v := range slice {
+ v = truncate(maxSpan.AttrValueLen, v)
+ out = append(out, telemetry.StringValue(v))
+ }
+ return telemetry.SliceValue(out...)
+ }
+ return telemetry.Value{}
+}
+
+// truncate returns a truncated version of s such that it contains less than
+// the limit number of characters. Truncation is applied by returning the limit
+// number of valid characters contained in s.
+//
+// If limit is negative, it returns the original string.
+//
+// UTF-8 is supported. When truncating, all invalid characters are dropped
+// before applying truncation.
+//
+// If s already contains less than the limit number of bytes, it is returned
+// unchanged. No invalid characters are removed.
+func truncate(limit int, s string) string {
+ // This prioritize performance in the following order based on the most
+ // common expected use-cases.
+ //
+ // - Short values less than the default limit (128).
+ // - Strings with valid encodings that exceed the limit.
+ // - No limit.
+ // - Strings with invalid encodings that exceed the limit.
+ if limit < 0 || len(s) <= limit {
+ return s
+ }
+
+ // Optimistically, assume all valid UTF-8.
+ var b strings.Builder
+ count := 0
+ for i, c := range s {
+ if c != utf8.RuneError {
+ count++
+ if count > limit {
+ return s[:i]
+ }
+ continue
+ }
+
+ _, size := utf8.DecodeRuneInString(s[i:])
+ if size == 1 {
+ // Invalid encoding.
+ b.Grow(len(s) - 1)
+ _, _ = b.WriteString(s[:i])
+ s = s[i:]
+ break
+ }
+ }
+
+ // Fast-path, no invalid input.
+ if b.Cap() == 0 {
+ return s
+ }
+
+ // Truncate while validating UTF-8.
+ for i := 0; i < len(s) && count < limit; {
+ c := s[i]
+ if c < utf8.RuneSelf {
+ // Optimization for single byte runes (common case).
+ _ = b.WriteByte(c)
+ i++
+ count++
+ continue
+ }
+
+ _, size := utf8.DecodeRuneInString(s[i:])
+ if size == 1 {
+ // We checked for all 1-byte runes above, this is a RuneError.
+ i++
+ continue
+ }
+
+ _, _ = b.WriteString(s[i : i+size])
+ i += size
+ count++
+ }
+
+ return b.String()
+}
+
+func (s *autoSpan) End(opts ...SpanEndOption) {
+ if s == nil || !s.sampled.Swap(false) {
+ return
+ }
+
+ // s.end exists so the lock (s.mu) is not held while s.ended is called.
+ s.ended(s.end(opts))
+}
+
+func (s *autoSpan) end(opts []SpanEndOption) []byte {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ cfg := NewSpanEndConfig(opts...)
+ if t := cfg.Timestamp(); !t.IsZero() {
+ s.span.EndTime = cfg.Timestamp()
+ } else {
+ s.span.EndTime = time.Now()
+ }
+
+ b, _ := json.Marshal(s.traces) // TODO: do not ignore this error.
+ return b
+}
+
+// Expected to be implemented in eBPF.
+//
+//go:noinline
+func (*autoSpan) ended(buf []byte) { ended(buf) }
+
+// ended is used for testing.
+var ended = func([]byte) {}
+
+func (s *autoSpan) RecordError(err error, opts ...EventOption) {
+ if s == nil || err == nil || !s.sampled.Load() {
+ return
+ }
+
+ cfg := NewEventConfig(opts...)
+
+ attrs := cfg.Attributes()
+ attrs = append(attrs,
+ semconv.ExceptionType(typeStr(err)),
+ semconv.ExceptionMessage(err.Error()),
+ )
+ if cfg.StackTrace() {
+ buf := make([]byte, 2048)
+ n := runtime.Stack(buf, false)
+ attrs = append(attrs, semconv.ExceptionStacktrace(string(buf[0:n])))
+ }
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ s.addEvent(semconv.ExceptionEventName, cfg.Timestamp(), attrs)
+}
+
+func typeStr(i any) string {
+ t := reflect.TypeOf(i)
+ if t.PkgPath() == "" && t.Name() == "" {
+ // Likely a builtin type.
+ return t.String()
+ }
+ return fmt.Sprintf("%s.%s", t.PkgPath(), t.Name())
+}
+
+func (s *autoSpan) AddEvent(name string, opts ...EventOption) {
+ if s == nil || !s.sampled.Load() {
+ return
+ }
+
+ cfg := NewEventConfig(opts...)
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ s.addEvent(name, cfg.Timestamp(), cfg.Attributes())
+}
+
+// addEvent adds an event with name and attrs at tStamp to the span. The span
+// lock (s.mu) needs to be held by the caller.
+func (s *autoSpan) addEvent(name string, tStamp time.Time, attrs []attribute.KeyValue) {
+ limit := maxSpan.Events
+
+ if limit == 0 {
+ s.span.DroppedEvents++
+ return
+ }
+
+ if limit > 0 && len(s.span.Events) == limit {
+ // Drop head while avoiding allocation of more capacity.
+ copy(s.span.Events[:limit-1], s.span.Events[1:])
+ s.span.Events = s.span.Events[:limit-1]
+ s.span.DroppedEvents++
+ }
+
+ e := &telemetry.SpanEvent{Time: tStamp, Name: name}
+ e.Attrs, e.DroppedAttrs = convCappedAttrs(maxSpan.EventAttrs, attrs)
+
+ s.span.Events = append(s.span.Events, e)
+}
+
+func (s *autoSpan) AddLink(link Link) {
+ if s == nil || !s.sampled.Load() {
+ return
+ }
+
+ l := maxSpan.Links
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ if l == 0 {
+ s.span.DroppedLinks++
+ return
+ }
+
+ if l > 0 && len(s.span.Links) == l {
+ // Drop head while avoiding allocation of more capacity.
+ copy(s.span.Links[:l-1], s.span.Links[1:])
+ s.span.Links = s.span.Links[:l-1]
+ s.span.DroppedLinks++
+ }
+
+ s.span.Links = append(s.span.Links, convLink(link))
+}
+
+func convLinks(links []Link) []*telemetry.SpanLink {
+ out := make([]*telemetry.SpanLink, 0, len(links))
+ for _, link := range links {
+ out = append(out, convLink(link))
+ }
+ return out
+}
+
+func convLink(link Link) *telemetry.SpanLink {
+ l := &telemetry.SpanLink{
+ TraceID: telemetry.TraceID(link.SpanContext.TraceID()),
+ SpanID: telemetry.SpanID(link.SpanContext.SpanID()),
+ TraceState: link.SpanContext.TraceState().String(),
+ Flags: uint32(link.SpanContext.TraceFlags()),
+ }
+ l.Attrs, l.DroppedAttrs = convCappedAttrs(maxSpan.LinkAttrs, link.Attributes)
+
+ return l
+}
+
+func (s *autoSpan) SetName(name string) {
+ if s == nil || !s.sampled.Load() {
+ return
+ }
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ s.span.Name = name
+}
+
+func (*autoSpan) TracerProvider() TracerProvider { return newAutoTracerProvider() }
+
+// maxSpan are the span limits resolved during startup.
+var maxSpan = newSpanLimits()
+
+type spanLimits struct {
+ // Attrs is the number of allowed attributes for a span.
+ //
+ // This is resolved from the environment variable value for the
+ // OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT key if it exists. Otherwise, the
+ // environment variable value for OTEL_ATTRIBUTE_COUNT_LIMIT, or 128 if
+ // that is not set, is used.
+ Attrs int
+ // AttrValueLen is the maximum attribute value length allowed for a span.
+ //
+ // This is resolved from the environment variable value for the
+ // OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT key if it exists. Otherwise, the
+ // environment variable value for OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT, or -1
+ // if that is not set, is used.
+ AttrValueLen int
+ // Events is the number of allowed events for a span.
+ //
+ // This is resolved from the environment variable value for the
+ // OTEL_SPAN_EVENT_COUNT_LIMIT key, or 128 is used if that is not set.
+ Events int
+ // EventAttrs is the number of allowed attributes for a span event.
+ //
+ // The is resolved from the environment variable value for the
+ // OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT key, or 128 is used if that is not set.
+ EventAttrs int
+ // Links is the number of allowed Links for a span.
+ //
+ // This is resolved from the environment variable value for the
+ // OTEL_SPAN_LINK_COUNT_LIMIT, or 128 is used if that is not set.
+ Links int
+ // LinkAttrs is the number of allowed attributes for a span link.
+ //
+ // This is resolved from the environment variable value for the
+ // OTEL_LINK_ATTRIBUTE_COUNT_LIMIT, or 128 is used if that is not set.
+ LinkAttrs int
+}
+
+func newSpanLimits() spanLimits {
+ return spanLimits{
+ Attrs: firstEnv(
+ 128,
+ "OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT",
+ "OTEL_ATTRIBUTE_COUNT_LIMIT",
+ ),
+ AttrValueLen: firstEnv(
+ -1, // Unlimited.
+ "OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT",
+ "OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT",
+ ),
+ Events: firstEnv(128, "OTEL_SPAN_EVENT_COUNT_LIMIT"),
+ EventAttrs: firstEnv(128, "OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT"),
+ Links: firstEnv(128, "OTEL_SPAN_LINK_COUNT_LIMIT"),
+ LinkAttrs: firstEnv(128, "OTEL_LINK_ATTRIBUTE_COUNT_LIMIT"),
+ }
+}
+
+// firstEnv returns the parsed integer value of the first matching environment
+// variable from keys. The defaultVal is returned if the value is not an
+// integer or no match is found.
+func firstEnv(defaultVal int, keys ...string) int {
+ for _, key := range keys {
+ strV := os.Getenv(key)
+ if strV == "" {
+ continue
+ }
+
+ v, err := strconv.Atoi(strV)
+ if err == nil {
+ return v
+ }
+ // Ignore invalid environment variable.
+ }
+
+ return defaultVal
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go
new file mode 100644
index 000000000..f663547b4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go
@@ -0,0 +1,58 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry"
+
+// Attr is a key-value pair.
+type Attr struct {
+ Key string `json:"key,omitempty"`
+ Value Value `json:"value,omitempty"`
+}
+
+// String returns an Attr for a string value.
+func String(key, value string) Attr {
+ return Attr{key, StringValue(value)}
+}
+
+// Int64 returns an Attr for an int64 value.
+func Int64(key string, value int64) Attr {
+ return Attr{key, Int64Value(value)}
+}
+
+// Int returns an Attr for an int value.
+func Int(key string, value int) Attr {
+ return Int64(key, int64(value))
+}
+
+// Float64 returns an Attr for a float64 value.
+func Float64(key string, value float64) Attr {
+ return Attr{key, Float64Value(value)}
+}
+
+// Bool returns an Attr for a bool value.
+func Bool(key string, value bool) Attr {
+ return Attr{key, BoolValue(value)}
+}
+
+// Bytes returns an Attr for a []byte value.
+// The passed slice must not be changed after it is passed.
+func Bytes(key string, value []byte) Attr {
+ return Attr{key, BytesValue(value)}
+}
+
+// Slice returns an Attr for a []Value value.
+// The passed slice must not be changed after it is passed.
+func Slice(key string, value ...Value) Attr {
+ return Attr{key, SliceValue(value...)}
+}
+
+// Map returns an Attr for a map value.
+// The passed slice must not be changed after it is passed.
+func Map(key string, value ...Attr) Attr {
+ return Attr{key, MapValue(value...)}
+}
+
+// Equal returns if a is equal to b.
+func (a Attr) Equal(b Attr) bool {
+ return a.Key == b.Key && a.Value.Equal(b.Value)
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/doc.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/doc.go
new file mode 100644
index 000000000..5debe90bb
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/doc.go
@@ -0,0 +1,8 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+/*
+Package telemetry provides a lightweight representations of OpenTelemetry
+telemetry that is compatible with the OTLP JSON protobuf encoding.
+*/
+package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry"
diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go
new file mode 100644
index 000000000..7b1ae3c4e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go
@@ -0,0 +1,103 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry"
+
+import (
+ "encoding/hex"
+ "errors"
+ "fmt"
+)
+
+const (
+ traceIDSize = 16
+ spanIDSize = 8
+)
+
+// TraceID is a custom data type that is used for all trace IDs.
+type TraceID [traceIDSize]byte
+
+// String returns the hex string representation form of a TraceID.
+func (tid TraceID) String() string {
+ return hex.EncodeToString(tid[:])
+}
+
+// IsEmpty returns false if id contains at least one non-zero byte.
+func (tid TraceID) IsEmpty() bool {
+ return tid == [traceIDSize]byte{}
+}
+
+// MarshalJSON converts the trace ID into a hex string enclosed in quotes.
+func (tid TraceID) MarshalJSON() ([]byte, error) {
+ if tid.IsEmpty() {
+ return []byte(`""`), nil
+ }
+ return marshalJSON(tid[:])
+}
+
+// UnmarshalJSON inflates the trace ID from hex string, possibly enclosed in
+// quotes.
+func (tid *TraceID) UnmarshalJSON(data []byte) error {
+ *tid = [traceIDSize]byte{}
+ return unmarshalJSON(tid[:], data)
+}
+
+// SpanID is a custom data type that is used for all span IDs.
+type SpanID [spanIDSize]byte
+
+// String returns the hex string representation form of a SpanID.
+func (sid SpanID) String() string {
+ return hex.EncodeToString(sid[:])
+}
+
+// IsEmpty returns true if the span ID contains at least one non-zero byte.
+func (sid SpanID) IsEmpty() bool {
+ return sid == [spanIDSize]byte{}
+}
+
+// MarshalJSON converts span ID into a hex string enclosed in quotes.
+func (sid SpanID) MarshalJSON() ([]byte, error) {
+ if sid.IsEmpty() {
+ return []byte(`""`), nil
+ }
+ return marshalJSON(sid[:])
+}
+
+// UnmarshalJSON decodes span ID from hex string, possibly enclosed in quotes.
+func (sid *SpanID) UnmarshalJSON(data []byte) error {
+ *sid = [spanIDSize]byte{}
+ return unmarshalJSON(sid[:], data)
+}
+
+// marshalJSON converts id into a hex string enclosed in quotes.
+func marshalJSON(id []byte) ([]byte, error) {
+ // Plus 2 quote chars at the start and end.
+ hexLen := hex.EncodedLen(len(id)) + 2
+
+ b := make([]byte, hexLen)
+ hex.Encode(b[1:hexLen-1], id)
+ b[0], b[hexLen-1] = '"', '"'
+
+ return b, nil
+}
+
+// unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes.
+func unmarshalJSON(dst []byte, src []byte) error {
+ if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' {
+ src = src[1 : l-1]
+ }
+ nLen := len(src)
+ if nLen == 0 {
+ return nil
+ }
+
+ if len(dst) != hex.DecodedLen(nLen) {
+ return errors.New("invalid length for ID")
+ }
+
+ _, err := hex.Decode(dst, src)
+ if err != nil {
+ return fmt.Errorf("cannot unmarshal ID from string '%s': %w", string(src), err)
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/number.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/number.go
new file mode 100644
index 000000000..f5e3a8cec
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/number.go
@@ -0,0 +1,67 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry"
+
+import (
+ "encoding/json"
+ "strconv"
+)
+
+// protoInt64 represents the protobuf encoding of integers which can be either
+// strings or integers.
+type protoInt64 int64
+
+// Int64 returns the protoInt64 as an int64.
+func (i *protoInt64) Int64() int64 { return int64(*i) }
+
+// UnmarshalJSON decodes both strings and integers.
+func (i *protoInt64) UnmarshalJSON(data []byte) error {
+ if data[0] == '"' {
+ var str string
+ if err := json.Unmarshal(data, &str); err != nil {
+ return err
+ }
+ parsedInt, err := strconv.ParseInt(str, 10, 64)
+ if err != nil {
+ return err
+ }
+ *i = protoInt64(parsedInt)
+ } else {
+ var parsedInt int64
+ if err := json.Unmarshal(data, &parsedInt); err != nil {
+ return err
+ }
+ *i = protoInt64(parsedInt)
+ }
+ return nil
+}
+
+// protoUint64 represents the protobuf encoding of integers which can be either
+// strings or integers.
+type protoUint64 uint64
+
+// Int64 returns the protoUint64 as a uint64.
+func (i *protoUint64) Uint64() uint64 { return uint64(*i) }
+
+// UnmarshalJSON decodes both strings and integers.
+func (i *protoUint64) UnmarshalJSON(data []byte) error {
+ if data[0] == '"' {
+ var str string
+ if err := json.Unmarshal(data, &str); err != nil {
+ return err
+ }
+ parsedUint, err := strconv.ParseUint(str, 10, 64)
+ if err != nil {
+ return err
+ }
+ *i = protoUint64(parsedUint)
+ } else {
+ var parsedUint uint64
+ if err := json.Unmarshal(data, &parsedUint); err != nil {
+ return err
+ }
+ *i = protoUint64(parsedUint)
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/resource.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/resource.go
new file mode 100644
index 000000000..1798a702d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/resource.go
@@ -0,0 +1,66 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry"
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+)
+
+// Resource information.
+type Resource struct {
+ // Attrs are the set of attributes that describe the resource. Attribute
+ // keys MUST be unique (it is not allowed to have more than one attribute
+ // with the same key).
+ Attrs []Attr `json:"attributes,omitempty"`
+ // DroppedAttrs is the number of dropped attributes. If the value
+ // is 0, then no attributes were dropped.
+ DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"`
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r.
+func (r *Resource) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid Resource type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid Resource field: %#v", keyIface)
+ }
+
+ switch key {
+ case "attributes":
+ err = decoder.Decode(&r.Attrs)
+ case "droppedAttributesCount", "dropped_attributes_count":
+ err = decoder.Decode(&r.DroppedAttrs)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/scope.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/scope.go
new file mode 100644
index 000000000..c2b4c635b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/scope.go
@@ -0,0 +1,67 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry"
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+)
+
+// Scope is the identifying values of the instrumentation scope.
+type Scope struct {
+ Name string `json:"name,omitempty"`
+ Version string `json:"version,omitempty"`
+ Attrs []Attr `json:"attributes,omitempty"`
+ DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"`
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r.
+func (s *Scope) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid Scope type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid Scope field: %#v", keyIface)
+ }
+
+ switch key {
+ case "name":
+ err = decoder.Decode(&s.Name)
+ case "version":
+ err = decoder.Decode(&s.Version)
+ case "attributes":
+ err = decoder.Decode(&s.Attrs)
+ case "droppedAttributesCount", "dropped_attributes_count":
+ err = decoder.Decode(&s.DroppedAttrs)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go
new file mode 100644
index 000000000..3c5e1cdb1
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go
@@ -0,0 +1,460 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry"
+
+import (
+ "bytes"
+ "encoding/hex"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "time"
+)
+
+// A Span represents a single operation performed by a single component of the
+// system.
+type Span struct {
+ // A unique identifier for a trace. All spans from the same trace share
+ // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR
+ // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON
+ // is zero-length and thus is also invalid).
+ //
+ // This field is required.
+ TraceID TraceID `json:"traceId,omitempty"`
+ // A unique identifier for a span within a trace, assigned when the span
+ // is created. The ID is an 8-byte array. An ID with all zeroes OR of length
+ // other than 8 bytes is considered invalid (empty string in OTLP/JSON
+ // is zero-length and thus is also invalid).
+ //
+ // This field is required.
+ SpanID SpanID `json:"spanId,omitempty"`
+ // trace_state conveys information about request position in multiple distributed tracing graphs.
+ // It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header
+ // See also https://github.com/w3c/distributed-tracing for more details about this field.
+ TraceState string `json:"traceState,omitempty"`
+ // The `span_id` of this span's parent span. If this is a root span, then this
+ // field must be empty. The ID is an 8-byte array.
+ ParentSpanID SpanID `json:"parentSpanId,omitempty"`
+ // Flags, a bit field.
+ //
+ // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace
+ // Context specification. To read the 8-bit W3C trace flag, use
+ // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`.
+ //
+ // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
+ //
+ // Bits 8 and 9 represent the 3 states of whether a span's parent
+ // is remote. The states are (unknown, is not remote, is remote).
+ // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`.
+ // To read whether the span is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`.
+ //
+ // When creating span messages, if the message is logically forwarded from another source
+ // with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD
+ // be copied as-is. If creating from a source that does not have an equivalent flags field
+ // (such as a runtime representation of an OpenTelemetry span), the high 22 bits MUST
+ // be set to zero.
+ // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero.
+ //
+ // [Optional].
+ Flags uint32 `json:"flags,omitempty"`
+ // A description of the span's operation.
+ //
+ // For example, the name can be a qualified method name or a file name
+ // and a line number where the operation is called. A best practice is to use
+ // the same display name at the same call point in an application.
+ // This makes it easier to correlate spans in different traces.
+ //
+ // This field is semantically required to be set to non-empty string.
+ // Empty value is equivalent to an unknown span name.
+ //
+ // This field is required.
+ Name string `json:"name"`
+ // Distinguishes between spans generated in a particular context. For example,
+ // two spans with the same name may be distinguished using `CLIENT` (caller)
+ // and `SERVER` (callee) to identify queueing latency associated with the span.
+ Kind SpanKind `json:"kind,omitempty"`
+ // start_time_unix_nano is the start time of the span. On the client side, this is the time
+ // kept by the local machine where the span execution starts. On the server side, this
+ // is the time when the server's application handler starts running.
+ // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
+ //
+ // This field is semantically required and it is expected that end_time >= start_time.
+ StartTime time.Time `json:"startTimeUnixNano,omitempty"`
+ // end_time_unix_nano is the end time of the span. On the client side, this is the time
+ // kept by the local machine where the span execution ends. On the server side, this
+ // is the time when the server application handler stops running.
+ // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
+ //
+ // This field is semantically required and it is expected that end_time >= start_time.
+ EndTime time.Time `json:"endTimeUnixNano,omitempty"`
+ // attributes is a collection of key/value pairs. Note, global attributes
+ // like server name can be set using the resource API. Examples of attributes:
+ //
+ // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"
+ // "/http/server_latency": 300
+ // "example.com/myattribute": true
+ // "example.com/score": 10.239
+ //
+ // The OpenTelemetry API specification further restricts the allowed value types:
+ // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute
+ // Attribute keys MUST be unique (it is not allowed to have more than one
+ // attribute with the same key).
+ Attrs []Attr `json:"attributes,omitempty"`
+ // dropped_attributes_count is the number of attributes that were discarded. Attributes
+ // can be discarded because their keys are too long or because there are too many
+ // attributes. If this value is 0, then no attributes were dropped.
+ DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"`
+ // events is a collection of Event items.
+ Events []*SpanEvent `json:"events,omitempty"`
+ // dropped_events_count is the number of dropped events. If the value is 0, then no
+ // events were dropped.
+ DroppedEvents uint32 `json:"droppedEventsCount,omitempty"`
+ // links is a collection of Links, which are references from this span to a span
+ // in the same or different trace.
+ Links []*SpanLink `json:"links,omitempty"`
+ // dropped_links_count is the number of dropped links after the maximum size was
+ // enforced. If this value is 0, then no links were dropped.
+ DroppedLinks uint32 `json:"droppedLinksCount,omitempty"`
+ // An optional final status for this span. Semantically when Status isn't set, it means
+ // span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0).
+ Status *Status `json:"status,omitempty"`
+}
+
+// MarshalJSON encodes s into OTLP formatted JSON.
+func (s Span) MarshalJSON() ([]byte, error) {
+ startT := s.StartTime.UnixNano()
+ if s.StartTime.IsZero() || startT < 0 {
+ startT = 0
+ }
+
+ endT := s.EndTime.UnixNano()
+ if s.EndTime.IsZero() || endT < 0 {
+ endT = 0
+ }
+
+ // Override non-empty default SpanID marshal and omitempty.
+ var parentSpanId string
+ if !s.ParentSpanID.IsEmpty() {
+ b := make([]byte, hex.EncodedLen(spanIDSize))
+ hex.Encode(b, s.ParentSpanID[:])
+ parentSpanId = string(b)
+ }
+
+ type Alias Span
+ return json.Marshal(struct {
+ Alias
+ ParentSpanID string `json:"parentSpanId,omitempty"`
+ StartTime uint64 `json:"startTimeUnixNano,omitempty"`
+ EndTime uint64 `json:"endTimeUnixNano,omitempty"`
+ }{
+ Alias: Alias(s),
+ ParentSpanID: parentSpanId,
+ StartTime: uint64(startT), // nolint:gosec // >0 checked above.
+ EndTime: uint64(endT), // nolint:gosec // >0 checked above.
+ })
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into s.
+func (s *Span) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid Span type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid Span field: %#v", keyIface)
+ }
+
+ switch key {
+ case "traceId", "trace_id":
+ err = decoder.Decode(&s.TraceID)
+ case "spanId", "span_id":
+ err = decoder.Decode(&s.SpanID)
+ case "traceState", "trace_state":
+ err = decoder.Decode(&s.TraceState)
+ case "parentSpanId", "parent_span_id":
+ err = decoder.Decode(&s.ParentSpanID)
+ case "flags":
+ err = decoder.Decode(&s.Flags)
+ case "name":
+ err = decoder.Decode(&s.Name)
+ case "kind":
+ err = decoder.Decode(&s.Kind)
+ case "startTimeUnixNano", "start_time_unix_nano":
+ var val protoUint64
+ err = decoder.Decode(&val)
+ v := int64(min(val.Uint64(), math.MaxInt64)) // nolint: gosec // Overflow checked.
+ s.StartTime = time.Unix(0, v)
+ case "endTimeUnixNano", "end_time_unix_nano":
+ var val protoUint64
+ err = decoder.Decode(&val)
+ v := int64(min(val.Uint64(), math.MaxInt64)) // nolint: gosec // Overflow checked.
+ s.EndTime = time.Unix(0, v)
+ case "attributes":
+ err = decoder.Decode(&s.Attrs)
+ case "droppedAttributesCount", "dropped_attributes_count":
+ err = decoder.Decode(&s.DroppedAttrs)
+ case "events":
+ err = decoder.Decode(&s.Events)
+ case "droppedEventsCount", "dropped_events_count":
+ err = decoder.Decode(&s.DroppedEvents)
+ case "links":
+ err = decoder.Decode(&s.Links)
+ case "droppedLinksCount", "dropped_links_count":
+ err = decoder.Decode(&s.DroppedLinks)
+ case "status":
+ err = decoder.Decode(&s.Status)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// SpanFlags represents constants used to interpret the
+// Span.flags field, which is protobuf 'fixed32' type and is to
+// be used as bit-fields. Each non-zero value defined in this enum is
+// a bit-mask. To extract the bit-field, for example, use an
+// expression like:
+//
+// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK)
+//
+// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
+//
+// Note that Span flags were introduced in version 1.1 of the
+// OpenTelemetry protocol. Older Span producers do not set this
+// field, consequently consumers should not rely on the absence of a
+// particular flag bit to indicate the presence of a particular feature.
+type SpanFlags int32
+
+const (
+ // Bits 0-7 are used for trace flags.
+ SpanFlagsTraceFlagsMask SpanFlags = 255
+ // Bits 8 and 9 are used to indicate that the parent span or link span is remote.
+ // Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known.
+ // Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote.
+ SpanFlagsContextHasIsRemoteMask SpanFlags = 256
+ // SpanFlagsContextHasIsRemoteMask indicates the Span is remote.
+ SpanFlagsContextIsRemoteMask SpanFlags = 512
+)
+
+// SpanKind is the type of span. Can be used to specify additional relationships between spans
+// in addition to a parent/child relationship.
+type SpanKind int32
+
+const (
+ // Indicates that the span represents an internal operation within an application,
+ // as opposed to an operation happening at the boundaries. Default value.
+ SpanKindInternal SpanKind = 1
+ // Indicates that the span covers server-side handling of an RPC or other
+ // remote network request.
+ SpanKindServer SpanKind = 2
+ // Indicates that the span describes a request to some remote service.
+ SpanKindClient SpanKind = 3
+ // Indicates that the span describes a producer sending a message to a broker.
+ // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship
+ // between producer and consumer spans. A PRODUCER span ends when the message was accepted
+ // by the broker while the logical processing of the message might span a much longer time.
+ SpanKindProducer SpanKind = 4
+ // Indicates that the span describes consumer receiving a message from a broker.
+ // Like the PRODUCER kind, there is often no direct critical path latency relationship
+ // between producer and consumer spans.
+ SpanKindConsumer SpanKind = 5
+)
+
+// Event is a time-stamped annotation of the span, consisting of user-supplied
+// text description and key-value pairs.
+type SpanEvent struct {
+ // time_unix_nano is the time the event occurred.
+ Time time.Time `json:"timeUnixNano,omitempty"`
+ // name of the event.
+ // This field is semantically required to be set to non-empty string.
+ Name string `json:"name,omitempty"`
+ // attributes is a collection of attribute key/value pairs on the event.
+ // Attribute keys MUST be unique (it is not allowed to have more than one
+ // attribute with the same key).
+ Attrs []Attr `json:"attributes,omitempty"`
+ // dropped_attributes_count is the number of dropped attributes. If the value is 0,
+ // then no attributes were dropped.
+ DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"`
+}
+
+// MarshalJSON encodes e into OTLP formatted JSON.
+func (e SpanEvent) MarshalJSON() ([]byte, error) {
+ t := e.Time.UnixNano()
+ if e.Time.IsZero() || t < 0 {
+ t = 0
+ }
+
+ type Alias SpanEvent
+ return json.Marshal(struct {
+ Alias
+ Time uint64 `json:"timeUnixNano,omitempty"`
+ }{
+ Alias: Alias(e),
+ Time: uint64(t), // nolint: gosec // >0 checked above
+ })
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into se.
+func (se *SpanEvent) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid SpanEvent type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid SpanEvent field: %#v", keyIface)
+ }
+
+ switch key {
+ case "timeUnixNano", "time_unix_nano":
+ var val protoUint64
+ err = decoder.Decode(&val)
+ v := int64(min(val.Uint64(), math.MaxInt64)) // nolint: gosec // Overflow checked.
+ se.Time = time.Unix(0, v)
+ case "name":
+ err = decoder.Decode(&se.Name)
+ case "attributes":
+ err = decoder.Decode(&se.Attrs)
+ case "droppedAttributesCount", "dropped_attributes_count":
+ err = decoder.Decode(&se.DroppedAttrs)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// A pointer from the current span to another span in the same trace or in a
+// different trace. For example, this can be used in batching operations,
+// where a single batch handler processes multiple requests from different
+// traces or when the handler receives a request from a different project.
+type SpanLink struct {
+ // A unique identifier of a trace that this linked span is part of. The ID is a
+ // 16-byte array.
+ TraceID TraceID `json:"traceId,omitempty"`
+ // A unique identifier for the linked span. The ID is an 8-byte array.
+ SpanID SpanID `json:"spanId,omitempty"`
+ // The trace_state associated with the link.
+ TraceState string `json:"traceState,omitempty"`
+ // attributes is a collection of attribute key/value pairs on the link.
+ // Attribute keys MUST be unique (it is not allowed to have more than one
+ // attribute with the same key).
+ Attrs []Attr `json:"attributes,omitempty"`
+ // dropped_attributes_count is the number of dropped attributes. If the value is 0,
+ // then no attributes were dropped.
+ DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"`
+ // Flags, a bit field.
+ //
+ // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace
+ // Context specification. To read the 8-bit W3C trace flag, use
+ // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`.
+ //
+ // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
+ //
+ // Bits 8 and 9 represent the 3 states of whether the link is remote.
+ // The states are (unknown, is not remote, is remote).
+ // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`.
+ // To read whether the link is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`.
+ //
+ // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero.
+ // When creating new spans, bits 10-31 (most-significant 22-bits) MUST be zero.
+ //
+ // [Optional].
+ Flags uint32 `json:"flags,omitempty"`
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into sl.
+func (sl *SpanLink) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid SpanLink type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid SpanLink field: %#v", keyIface)
+ }
+
+ switch key {
+ case "traceId", "trace_id":
+ err = decoder.Decode(&sl.TraceID)
+ case "spanId", "span_id":
+ err = decoder.Decode(&sl.SpanID)
+ case "traceState", "trace_state":
+ err = decoder.Decode(&sl.TraceState)
+ case "attributes":
+ err = decoder.Decode(&sl.Attrs)
+ case "droppedAttributesCount", "dropped_attributes_count":
+ err = decoder.Decode(&sl.DroppedAttrs)
+ case "flags":
+ err = decoder.Decode(&sl.Flags)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go
new file mode 100644
index 000000000..1d013a8fa
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go
@@ -0,0 +1,40 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry"
+
+// For the semantics of status codes see
+// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status
+type StatusCode int32
+
+const (
+ // The default status.
+ StatusCodeUnset StatusCode = 0
+ // The Span has been validated by an Application developer or Operator to
+ // have completed successfully.
+ StatusCodeOK StatusCode = 1
+ // The Span contains an error.
+ StatusCodeError StatusCode = 2
+)
+
+var statusCodeStrings = []string{
+ "Unset",
+ "OK",
+ "Error",
+}
+
+func (s StatusCode) String() string {
+ if s >= 0 && int(s) < len(statusCodeStrings) {
+ return statusCodeStrings[s]
+ }
+ return ""
+}
+
+// The Status type defines a logical error model that is suitable for different
+// programming environments, including REST APIs and RPC APIs.
+type Status struct {
+ // A developer-facing human readable error message.
+ Message string `json:"message,omitempty"`
+ // The status code.
+ Code StatusCode `json:"code,omitempty"`
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go
new file mode 100644
index 000000000..b03940708
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go
@@ -0,0 +1,189 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry"
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+)
+
+// Traces represents the traces data that can be stored in a persistent storage,
+// OR can be embedded by other protocols that transfer OTLP traces data but do
+// not implement the OTLP protocol.
+//
+// The main difference between this message and collector protocol is that
+// in this message there will not be any "control" or "metadata" specific to
+// OTLP protocol.
+//
+// When new fields are added into this message, the OTLP request MUST be updated
+// as well.
+type Traces struct {
+ // An array of ResourceSpans.
+ // For data coming from a single resource this array will typically contain
+ // one element. Intermediary nodes that receive data from multiple origins
+ // typically batch the data before forwarding further and in that case this
+ // array will contain multiple elements.
+ ResourceSpans []*ResourceSpans `json:"resourceSpans,omitempty"`
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into td.
+func (td *Traces) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid TracesData type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid TracesData field: %#v", keyIface)
+ }
+
+ switch key {
+ case "resourceSpans", "resource_spans":
+ err = decoder.Decode(&td.ResourceSpans)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// A collection of ScopeSpans from a Resource.
+type ResourceSpans struct {
+ // The resource for the spans in this message.
+ // If this field is not set then no resource info is known.
+ Resource Resource `json:"resource"`
+ // A list of ScopeSpans that originate from a resource.
+ ScopeSpans []*ScopeSpans `json:"scopeSpans,omitempty"`
+ // This schema_url applies to the data in the "resource" field. It does not apply
+ // to the data in the "scope_spans" field which have their own schema_url field.
+ SchemaURL string `json:"schemaUrl,omitempty"`
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into rs.
+func (rs *ResourceSpans) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid ResourceSpans type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid ResourceSpans field: %#v", keyIface)
+ }
+
+ switch key {
+ case "resource":
+ err = decoder.Decode(&rs.Resource)
+ case "scopeSpans", "scope_spans":
+ err = decoder.Decode(&rs.ScopeSpans)
+ case "schemaUrl", "schema_url":
+ err = decoder.Decode(&rs.SchemaURL)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// A collection of Spans produced by an InstrumentationScope.
+type ScopeSpans struct {
+ // The instrumentation scope information for the spans in this message.
+ // Semantically when InstrumentationScope isn't set, it is equivalent with
+ // an empty instrumentation scope name (unknown).
+ Scope *Scope `json:"scope"`
+ // A list of Spans that originate from an instrumentation scope.
+ Spans []*Span `json:"spans,omitempty"`
+ // The Schema URL, if known. This is the identifier of the Schema that the span data
+ // is recorded in. To learn more about Schema URL see
+ // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
+ // This schema_url applies to all spans and span events in the "spans" field.
+ SchemaURL string `json:"schemaUrl,omitempty"`
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into ss.
+func (ss *ScopeSpans) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid ScopeSpans type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid ScopeSpans field: %#v", keyIface)
+ }
+
+ switch key {
+ case "scope":
+ err = decoder.Decode(&ss.Scope)
+ case "spans":
+ err = decoder.Decode(&ss.Spans)
+ case "schemaUrl", "schema_url":
+ err = decoder.Decode(&ss.SchemaURL)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go
new file mode 100644
index 000000000..7251492da
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go
@@ -0,0 +1,453 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry"
+
+import (
+ "bytes"
+ "cmp"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "slices"
+ "strconv"
+ "unsafe"
+)
+
+// A Value represents a structured value.
+// A zero value is valid and represents an empty value.
+type Value struct {
+ // Ensure forward compatibility by explicitly making this not comparable.
+ noCmp [0]func() //nolint: unused // This is indeed used.
+
+ // num holds the value for Int64, Float64, and Bool. It holds the length
+ // for String, Bytes, Slice, Map.
+ num uint64
+ // any holds either the KindBool, KindInt64, KindFloat64, stringptr,
+ // bytesptr, sliceptr, or mapptr. If KindBool, KindInt64, or KindFloat64
+ // then the value of Value is in num as described above. Otherwise, it
+ // contains the value wrapped in the appropriate type.
+ any any
+}
+
+type (
+ // sliceptr represents a value in Value.any for KindString Values.
+ stringptr *byte
+ // bytesptr represents a value in Value.any for KindBytes Values.
+ bytesptr *byte
+ // sliceptr represents a value in Value.any for KindSlice Values.
+ sliceptr *Value
+ // mapptr represents a value in Value.any for KindMap Values.
+ mapptr *Attr
+)
+
+// ValueKind is the kind of a [Value].
+type ValueKind int
+
+// ValueKind values.
+const (
+ ValueKindEmpty ValueKind = iota
+ ValueKindBool
+ ValueKindFloat64
+ ValueKindInt64
+ ValueKindString
+ ValueKindBytes
+ ValueKindSlice
+ ValueKindMap
+)
+
+var valueKindStrings = []string{
+ "Empty",
+ "Bool",
+ "Float64",
+ "Int64",
+ "String",
+ "Bytes",
+ "Slice",
+ "Map",
+}
+
+func (k ValueKind) String() string {
+ if k >= 0 && int(k) < len(valueKindStrings) {
+ return valueKindStrings[k]
+ }
+ return ""
+}
+
+// StringValue returns a new [Value] for a string.
+func StringValue(v string) Value {
+ return Value{
+ num: uint64(len(v)),
+ any: stringptr(unsafe.StringData(v)),
+ }
+}
+
+// IntValue returns a [Value] for an int.
+func IntValue(v int) Value { return Int64Value(int64(v)) }
+
+// Int64Value returns a [Value] for an int64.
+func Int64Value(v int64) Value {
+ return Value{
+ num: uint64(v), // nolint: gosec // Store raw bytes.
+ any: ValueKindInt64,
+ }
+}
+
+// Float64Value returns a [Value] for a float64.
+func Float64Value(v float64) Value {
+ return Value{num: math.Float64bits(v), any: ValueKindFloat64}
+}
+
+// BoolValue returns a [Value] for a bool.
+func BoolValue(v bool) Value { //nolint:revive // Not a control flag.
+ var n uint64
+ if v {
+ n = 1
+ }
+ return Value{num: n, any: ValueKindBool}
+}
+
+// BytesValue returns a [Value] for a byte slice. The passed slice must not be
+// changed after it is passed.
+func BytesValue(v []byte) Value {
+ return Value{
+ num: uint64(len(v)),
+ any: bytesptr(unsafe.SliceData(v)),
+ }
+}
+
+// SliceValue returns a [Value] for a slice of [Value]. The passed slice must
+// not be changed after it is passed.
+func SliceValue(vs ...Value) Value {
+ return Value{
+ num: uint64(len(vs)),
+ any: sliceptr(unsafe.SliceData(vs)),
+ }
+}
+
+// MapValue returns a new [Value] for a slice of key-value pairs. The passed
+// slice must not be changed after it is passed.
+func MapValue(kvs ...Attr) Value {
+ return Value{
+ num: uint64(len(kvs)),
+ any: mapptr(unsafe.SliceData(kvs)),
+ }
+}
+
+// AsString returns the value held by v as a string.
+func (v Value) AsString() string {
+ if sp, ok := v.any.(stringptr); ok {
+ return unsafe.String(sp, v.num)
+ }
+ // TODO: error handle
+ return ""
+}
+
+// asString returns the value held by v as a string. It will panic if the Value
+// is not KindString.
+func (v Value) asString() string {
+ return unsafe.String(v.any.(stringptr), v.num)
+}
+
+// AsInt64 returns the value held by v as an int64.
+func (v Value) AsInt64() int64 {
+ if v.Kind() != ValueKindInt64 {
+ // TODO: error handle
+ return 0
+ }
+ return v.asInt64()
+}
+
+// asInt64 returns the value held by v as an int64. If v is not of KindInt64,
+// this will return garbage.
+func (v Value) asInt64() int64 {
+ // Assumes v.num was a valid int64 (overflow not checked).
+ return int64(v.num) // nolint: gosec
+}
+
+// AsBool returns the value held by v as a bool.
+func (v Value) AsBool() bool {
+ if v.Kind() != ValueKindBool {
+ // TODO: error handle
+ return false
+ }
+ return v.asBool()
+}
+
+// asBool returns the value held by v as a bool. If v is not of KindBool, this
+// will return garbage.
+func (v Value) asBool() bool { return v.num == 1 }
+
+// AsFloat64 returns the value held by v as a float64.
+func (v Value) AsFloat64() float64 {
+ if v.Kind() != ValueKindFloat64 {
+ // TODO: error handle
+ return 0
+ }
+ return v.asFloat64()
+}
+
+// asFloat64 returns the value held by v as a float64. If v is not of
+// KindFloat64, this will return garbage.
+func (v Value) asFloat64() float64 { return math.Float64frombits(v.num) }
+
+// AsBytes returns the value held by v as a []byte.
+func (v Value) AsBytes() []byte {
+ if sp, ok := v.any.(bytesptr); ok {
+ return unsafe.Slice((*byte)(sp), v.num)
+ }
+ // TODO: error handle
+ return nil
+}
+
+// asBytes returns the value held by v as a []byte. It will panic if the Value
+// is not KindBytes.
+func (v Value) asBytes() []byte {
+ return unsafe.Slice((*byte)(v.any.(bytesptr)), v.num)
+}
+
+// AsSlice returns the value held by v as a []Value.
+func (v Value) AsSlice() []Value {
+ if sp, ok := v.any.(sliceptr); ok {
+ return unsafe.Slice((*Value)(sp), v.num)
+ }
+ // TODO: error handle
+ return nil
+}
+
+// asSlice returns the value held by v as a []Value. It will panic if the Value
+// is not KindSlice.
+func (v Value) asSlice() []Value {
+ return unsafe.Slice((*Value)(v.any.(sliceptr)), v.num)
+}
+
+// AsMap returns the value held by v as a []Attr.
+func (v Value) AsMap() []Attr {
+ if sp, ok := v.any.(mapptr); ok {
+ return unsafe.Slice((*Attr)(sp), v.num)
+ }
+ // TODO: error handle
+ return nil
+}
+
+// asMap returns the value held by v as a []Attr. It will panic if the
+// Value is not KindMap.
+func (v Value) asMap() []Attr {
+ return unsafe.Slice((*Attr)(v.any.(mapptr)), v.num)
+}
+
+// Kind returns the Kind of v.
+func (v Value) Kind() ValueKind {
+ switch x := v.any.(type) {
+ case ValueKind:
+ return x
+ case stringptr:
+ return ValueKindString
+ case bytesptr:
+ return ValueKindBytes
+ case sliceptr:
+ return ValueKindSlice
+ case mapptr:
+ return ValueKindMap
+ default:
+ return ValueKindEmpty
+ }
+}
+
+// Empty returns if v does not hold any value.
+func (v Value) Empty() bool { return v.Kind() == ValueKindEmpty }
+
+// Equal returns if v is equal to w.
+func (v Value) Equal(w Value) bool {
+ k1 := v.Kind()
+ k2 := w.Kind()
+ if k1 != k2 {
+ return false
+ }
+ switch k1 {
+ case ValueKindInt64, ValueKindBool:
+ return v.num == w.num
+ case ValueKindString:
+ return v.asString() == w.asString()
+ case ValueKindFloat64:
+ return v.asFloat64() == w.asFloat64()
+ case ValueKindSlice:
+ return slices.EqualFunc(v.asSlice(), w.asSlice(), Value.Equal)
+ case ValueKindMap:
+ sv := sortMap(v.asMap())
+ sw := sortMap(w.asMap())
+ return slices.EqualFunc(sv, sw, Attr.Equal)
+ case ValueKindBytes:
+ return bytes.Equal(v.asBytes(), w.asBytes())
+ case ValueKindEmpty:
+ return true
+ default:
+ // TODO: error handle
+ return false
+ }
+}
+
+func sortMap(m []Attr) []Attr {
+ sm := make([]Attr, len(m))
+ copy(sm, m)
+ slices.SortFunc(sm, func(a, b Attr) int {
+ return cmp.Compare(a.Key, b.Key)
+ })
+
+ return sm
+}
+
+// String returns Value's value as a string, formatted like [fmt.Sprint].
+//
+// The returned string is meant for debugging;
+// the string representation is not stable.
+func (v Value) String() string {
+ switch v.Kind() {
+ case ValueKindString:
+ return v.asString()
+ case ValueKindInt64:
+ // Assumes v.num was a valid int64 (overflow not checked).
+ return strconv.FormatInt(int64(v.num), 10) // nolint: gosec
+ case ValueKindFloat64:
+ return strconv.FormatFloat(v.asFloat64(), 'g', -1, 64)
+ case ValueKindBool:
+ return strconv.FormatBool(v.asBool())
+ case ValueKindBytes:
+ return fmt.Sprint(v.asBytes())
+ case ValueKindMap:
+ return fmt.Sprint(v.asMap())
+ case ValueKindSlice:
+ return fmt.Sprint(v.asSlice())
+ case ValueKindEmpty:
+ return ""
+ default:
+ // Try to handle this as gracefully as possible.
+ //
+ // Don't panic here. The goal here is to have developers find this
+ // first if a slog.Kind is is not handled. It is
+ // preferable to have user's open issue asking why their attributes
+ // have a "unhandled: " prefix than say that their code is panicking.
+ return fmt.Sprintf("", v.Kind())
+ }
+}
+
+// MarshalJSON encodes v into OTLP formatted JSON.
+func (v *Value) MarshalJSON() ([]byte, error) {
+ switch v.Kind() {
+ case ValueKindString:
+ return json.Marshal(struct {
+ Value string `json:"stringValue"`
+ }{v.asString()})
+ case ValueKindInt64:
+ return json.Marshal(struct {
+ Value string `json:"intValue"`
+ }{strconv.FormatInt(int64(v.num), 10)}) // nolint: gosec // From raw bytes.
+ case ValueKindFloat64:
+ return json.Marshal(struct {
+ Value float64 `json:"doubleValue"`
+ }{v.asFloat64()})
+ case ValueKindBool:
+ return json.Marshal(struct {
+ Value bool `json:"boolValue"`
+ }{v.asBool()})
+ case ValueKindBytes:
+ return json.Marshal(struct {
+ Value []byte `json:"bytesValue"`
+ }{v.asBytes()})
+ case ValueKindMap:
+ return json.Marshal(struct {
+ Value struct {
+ Values []Attr `json:"values"`
+ } `json:"kvlistValue"`
+ }{struct {
+ Values []Attr `json:"values"`
+ }{v.asMap()}})
+ case ValueKindSlice:
+ return json.Marshal(struct {
+ Value struct {
+ Values []Value `json:"values"`
+ } `json:"arrayValue"`
+ }{struct {
+ Values []Value `json:"values"`
+ }{v.asSlice()}})
+ case ValueKindEmpty:
+ return nil, nil
+ default:
+ return nil, fmt.Errorf("unknown Value kind: %s", v.Kind().String())
+ }
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into v.
+func (v *Value) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid Value type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid Value key: %#v", keyIface)
+ }
+
+ switch key {
+ case "stringValue", "string_value":
+ var val string
+ err = decoder.Decode(&val)
+ *v = StringValue(val)
+ case "boolValue", "bool_value":
+ var val bool
+ err = decoder.Decode(&val)
+ *v = BoolValue(val)
+ case "intValue", "int_value":
+ var val protoInt64
+ err = decoder.Decode(&val)
+ *v = Int64Value(val.Int64())
+ case "doubleValue", "double_value":
+ var val float64
+ err = decoder.Decode(&val)
+ *v = Float64Value(val)
+ case "bytesValue", "bytes_value":
+ var val64 string
+ if err := decoder.Decode(&val64); err != nil {
+ return err
+ }
+ var val []byte
+ val, err = base64.StdEncoding.DecodeString(val64)
+ *v = BytesValue(val)
+ case "arrayValue", "array_value":
+ var val struct{ Values []Value }
+ err = decoder.Decode(&val)
+ *v = SliceValue(val.Values...)
+ case "kvlistValue", "kvlist_value":
+ var val struct{ Values []Attr }
+ err = decoder.Decode(&val)
+ *v = MapValue(val.Values...)
+ default:
+ // Skip unknown.
+ continue
+ }
+ // Use first valid. Ignore the rest.
+ return err
+ }
+
+ // Only unknown fields. Return nil without unmarshaling any value.
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop.go
index ca20e9997..c8b1ae5d6 100644
--- a/vendor/go.opentelemetry.io/otel/trace/noop.go
+++ b/vendor/go.opentelemetry.io/otel/trace/noop.go
@@ -82,4 +82,22 @@ func (noopSpan) AddLink(Link) {}
func (noopSpan) SetName(string) {}
// TracerProvider returns a no-op TracerProvider.
-func (noopSpan) TracerProvider() TracerProvider { return noopTracerProvider{} }
+func (s noopSpan) TracerProvider() TracerProvider {
+ return s.tracerProvider(autoInstEnabled)
+}
+
+// autoInstEnabled defines if the auto-instrumentation SDK is enabled.
+//
+// The auto-instrumentation is expected to overwrite this value to true when it
+// attaches to the process.
+var autoInstEnabled = new(bool)
+
+// tracerProvider return a noopTracerProvider if autoEnabled is false,
+// otherwise it will return a TracerProvider from the sdk package used in
+// auto-instrumentation.
+func (noopSpan) tracerProvider(autoEnabled *bool) TracerProvider {
+ if *autoEnabled {
+ return newAutoTracerProvider()
+ }
+ return noopTracerProvider{}
+}
diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go
index fb7d12673..d5fa71f67 100644
--- a/vendor/go.opentelemetry.io/otel/version.go
+++ b/vendor/go.opentelemetry.io/otel/version.go
@@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel"
// Version is the current release version of OpenTelemetry in use.
func Version() string {
- return "1.33.0"
+ return "1.35.0"
}
diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml
index 9f878cd1f..2b4cb4b41 100644
--- a/vendor/go.opentelemetry.io/otel/versions.yaml
+++ b/vendor/go.opentelemetry.io/otel/versions.yaml
@@ -3,7 +3,7 @@
module-sets:
stable-v1:
- version: v1.33.0
+ version: v1.35.0
modules:
- go.opentelemetry.io/otel
- go.opentelemetry.io/otel/bridge/opencensus
@@ -23,11 +23,11 @@ module-sets:
- go.opentelemetry.io/otel/sdk/metric
- go.opentelemetry.io/otel/trace
experimental-metrics:
- version: v0.55.0
+ version: v0.57.0
modules:
- go.opentelemetry.io/otel/exporters/prometheus
experimental-logs:
- version: v0.9.0
+ version: v0.11.0
modules:
- go.opentelemetry.io/otel/log
- go.opentelemetry.io/otel/sdk/log
@@ -40,3 +40,4 @@ module-sets:
- go.opentelemetry.io/otel/schema
excluded-modules:
- go.opentelemetry.io/otel/internal/tools
+ - go.opentelemetry.io/otel/trace/internal/telemetry/test
diff --git a/vendor/go.opentelemetry.io/proto/otlp/LICENSE b/vendor/go.opentelemetry.io/proto/otlp/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/proto/otlp/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go
new file mode 100644
index 000000000..c1af04e84
--- /dev/null
+++ b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go
@@ -0,0 +1,367 @@
+// Copyright 2019, OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v3.21.6
+// source: opentelemetry/proto/collector/trace/v1/trace_service.proto
+
+package v1
+
+import (
+ v1 "go.opentelemetry.io/proto/otlp/trace/v1"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type ExportTraceServiceRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // An array of ResourceSpans.
+ // For data coming from a single resource this array will typically contain one
+ // element. Intermediary nodes (such as OpenTelemetry Collector) that receive
+ // data from multiple origins typically batch the data before forwarding further and
+ // in that case this array will contain multiple elements.
+ ResourceSpans []*v1.ResourceSpans `protobuf:"bytes,1,rep,name=resource_spans,json=resourceSpans,proto3" json:"resource_spans,omitempty"`
+}
+
+func (x *ExportTraceServiceRequest) Reset() {
+ *x = ExportTraceServiceRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ExportTraceServiceRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExportTraceServiceRequest) ProtoMessage() {}
+
+func (x *ExportTraceServiceRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExportTraceServiceRequest.ProtoReflect.Descriptor instead.
+func (*ExportTraceServiceRequest) Descriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ExportTraceServiceRequest) GetResourceSpans() []*v1.ResourceSpans {
+ if x != nil {
+ return x.ResourceSpans
+ }
+ return nil
+}
+
+type ExportTraceServiceResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The details of a partially successful export request.
+ //
+ // If the request is only partially accepted
+ // (i.e. when the server accepts only parts of the data and rejects the rest)
+ // the server MUST initialize the `partial_success` field and MUST
+ // set the `rejected_` with the number of items it rejected.
+ //
+ // Servers MAY also make use of the `partial_success` field to convey
+ // warnings/suggestions to senders even when the request was fully accepted.
+ // In such cases, the `rejected_` MUST have a value of `0` and
+ // the `error_message` MUST be non-empty.
+ //
+ // A `partial_success` message with an empty value (rejected_ = 0 and
+ // `error_message` = "") is equivalent to it not being set/present. Senders
+ // SHOULD interpret it the same way as in the full success case.
+ PartialSuccess *ExportTracePartialSuccess `protobuf:"bytes,1,opt,name=partial_success,json=partialSuccess,proto3" json:"partial_success,omitempty"`
+}
+
+func (x *ExportTraceServiceResponse) Reset() {
+ *x = ExportTraceServiceResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ExportTraceServiceResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExportTraceServiceResponse) ProtoMessage() {}
+
+func (x *ExportTraceServiceResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExportTraceServiceResponse.ProtoReflect.Descriptor instead.
+func (*ExportTraceServiceResponse) Descriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ExportTraceServiceResponse) GetPartialSuccess() *ExportTracePartialSuccess {
+ if x != nil {
+ return x.PartialSuccess
+ }
+ return nil
+}
+
+type ExportTracePartialSuccess struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The number of rejected spans.
+ //
+ // A `rejected_` field holding a `0` value indicates that the
+ // request was fully accepted.
+ RejectedSpans int64 `protobuf:"varint,1,opt,name=rejected_spans,json=rejectedSpans,proto3" json:"rejected_spans,omitempty"`
+ // A developer-facing human-readable message in English. It should be used
+ // either to explain why the server rejected parts of the data during a partial
+ // success or to convey warnings/suggestions during a full success. The message
+ // should offer guidance on how users can address such issues.
+ //
+ // error_message is an optional field. An error_message with an empty value
+ // is equivalent to it not being set.
+ ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"`
+}
+
+func (x *ExportTracePartialSuccess) Reset() {
+ *x = ExportTracePartialSuccess{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ExportTracePartialSuccess) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExportTracePartialSuccess) ProtoMessage() {}
+
+func (x *ExportTracePartialSuccess) ProtoReflect() protoreflect.Message {
+ mi := &file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExportTracePartialSuccess.ProtoReflect.Descriptor instead.
+func (*ExportTracePartialSuccess) Descriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ExportTracePartialSuccess) GetRejectedSpans() int64 {
+ if x != nil {
+ return x.RejectedSpans
+ }
+ return 0
+}
+
+func (x *ExportTracePartialSuccess) GetErrorMessage() string {
+ if x != nil {
+ return x.ErrorMessage
+ }
+ return ""
+}
+
+var File_opentelemetry_proto_collector_trace_v1_trace_service_proto protoreflect.FileDescriptor
+
+var file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDesc = []byte{
+ 0x0a, 0x3a, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f,
+ 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x73,
+ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x26, 0x6f, 0x70,
+ 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x74, 0x72, 0x61, 0x63,
+ 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x28, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65,
+ 0x74, 0x72, 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f,
+ 0x76, 0x31, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6f,
+ 0x0a, 0x19, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x52, 0x0a, 0x0e, 0x72,
+ 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x18, 0x01, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65,
+ 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e,
+ 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73,
+ 0x52, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x22,
+ 0x88, 0x01, 0x0a, 0x1a, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53,
+ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6a,
+ 0x0a, 0x0f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73,
+ 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65,
+ 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f,
+ 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31,
+ 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74,
+ 0x69, 0x61, 0x6c, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x0e, 0x70, 0x61, 0x72, 0x74,
+ 0x69, 0x61, 0x6c, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x22, 0x67, 0x0a, 0x19, 0x45, 0x78,
+ 0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c,
+ 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x6a, 0x65, 0x63,
+ 0x74, 0x65, 0x64, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52,
+ 0x0d, 0x72, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x65, 0x64, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x23,
+ 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73,
+ 0x61, 0x67, 0x65, 0x32, 0xa2, 0x01, 0x0a, 0x0c, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x12, 0x91, 0x01, 0x0a, 0x06, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x12,
+ 0x41, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e,
+ 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54,
+ 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x42, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74,
+ 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74,
+ 0x6f, 0x72, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f,
+ 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x9c, 0x01, 0x0a, 0x29, 0x69, 0x6f, 0x2e,
+ 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x74, 0x72,
+ 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x11, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x2e,
+ 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f,
+ 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x63, 0x6f, 0x6c, 0x6c,
+ 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0xaa, 0x02,
+ 0x26, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x50,
+ 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x54,
+ 0x72, 0x61, 0x63, 0x65, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescOnce sync.Once
+ file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescData = file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDesc
+)
+
+func file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescGZIP() []byte {
+ file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescOnce.Do(func() {
+ file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescData)
+ })
+ return file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescData
+}
+
+var file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
+var file_opentelemetry_proto_collector_trace_v1_trace_service_proto_goTypes = []interface{}{
+ (*ExportTraceServiceRequest)(nil), // 0: opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest
+ (*ExportTraceServiceResponse)(nil), // 1: opentelemetry.proto.collector.trace.v1.ExportTraceServiceResponse
+ (*ExportTracePartialSuccess)(nil), // 2: opentelemetry.proto.collector.trace.v1.ExportTracePartialSuccess
+ (*v1.ResourceSpans)(nil), // 3: opentelemetry.proto.trace.v1.ResourceSpans
+}
+var file_opentelemetry_proto_collector_trace_v1_trace_service_proto_depIdxs = []int32{
+ 3, // 0: opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest.resource_spans:type_name -> opentelemetry.proto.trace.v1.ResourceSpans
+ 2, // 1: opentelemetry.proto.collector.trace.v1.ExportTraceServiceResponse.partial_success:type_name -> opentelemetry.proto.collector.trace.v1.ExportTracePartialSuccess
+ 0, // 2: opentelemetry.proto.collector.trace.v1.TraceService.Export:input_type -> opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest
+ 1, // 3: opentelemetry.proto.collector.trace.v1.TraceService.Export:output_type -> opentelemetry.proto.collector.trace.v1.ExportTraceServiceResponse
+ 3, // [3:4] is the sub-list for method output_type
+ 2, // [2:3] is the sub-list for method input_type
+ 2, // [2:2] is the sub-list for extension type_name
+ 2, // [2:2] is the sub-list for extension extendee
+ 0, // [0:2] is the sub-list for field type_name
+}
+
+func init() { file_opentelemetry_proto_collector_trace_v1_trace_service_proto_init() }
+func file_opentelemetry_proto_collector_trace_v1_trace_service_proto_init() {
+ if File_opentelemetry_proto_collector_trace_v1_trace_service_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ExportTraceServiceRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ExportTraceServiceResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ExportTracePartialSuccess); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 3,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_opentelemetry_proto_collector_trace_v1_trace_service_proto_goTypes,
+ DependencyIndexes: file_opentelemetry_proto_collector_trace_v1_trace_service_proto_depIdxs,
+ MessageInfos: file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes,
+ }.Build()
+ File_opentelemetry_proto_collector_trace_v1_trace_service_proto = out.File
+ file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDesc = nil
+ file_opentelemetry_proto_collector_trace_v1_trace_service_proto_goTypes = nil
+ file_opentelemetry_proto_collector_trace_v1_trace_service_proto_depIdxs = nil
+}
diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.gw.go b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.gw.go
new file mode 100644
index 000000000..bb1bd261e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.gw.go
@@ -0,0 +1,171 @@
+// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
+// source: opentelemetry/proto/collector/trace/v1/trace_service.proto
+
+/*
+Package v1 is a reverse proxy.
+
+It translates gRPC into RESTful JSON APIs.
+*/
+package v1
+
+import (
+ "context"
+ "io"
+ "net/http"
+
+ "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
+ "github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/status"
+ "google.golang.org/protobuf/proto"
+)
+
+// Suppress "imported and not used" errors
+var _ codes.Code
+var _ io.Reader
+var _ status.Status
+var _ = runtime.String
+var _ = utilities.NewDoubleArray
+var _ = metadata.Join
+
+func request_TraceService_Export_0(ctx context.Context, marshaler runtime.Marshaler, client TraceServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq ExportTraceServiceRequest
+ var metadata runtime.ServerMetadata
+
+ newReader, berr := utilities.IOReaderFactory(req.Body)
+ if berr != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
+ }
+ if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.Export(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func local_request_TraceService_Export_0(ctx context.Context, marshaler runtime.Marshaler, server TraceServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq ExportTraceServiceRequest
+ var metadata runtime.ServerMetadata
+
+ newReader, berr := utilities.IOReaderFactory(req.Body)
+ if berr != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
+ }
+ if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := server.Export(ctx, &protoReq)
+ return msg, metadata, err
+
+}
+
+// RegisterTraceServiceHandlerServer registers the http handlers for service TraceService to "mux".
+// UnaryRPC :call TraceServiceServer directly.
+// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
+// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterTraceServiceHandlerFromEndpoint instead.
+func RegisterTraceServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server TraceServiceServer) error {
+
+ mux.Handle("POST", pattern_TraceService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ var err error
+ var annotatedContext context.Context
+ annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", runtime.WithHTTPPathPattern("/v1/traces"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_TraceService_Export_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_TraceService_Export_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ return nil
+}
+
+// RegisterTraceServiceHandlerFromEndpoint is same as RegisterTraceServiceHandler but
+// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
+func RegisterTraceServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
+ conn, err := grpc.Dial(endpoint, opts...)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ return
+ }
+ go func() {
+ <-ctx.Done()
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ }()
+ }()
+
+ return RegisterTraceServiceHandler(ctx, mux, conn)
+}
+
+// RegisterTraceServiceHandler registers the http handlers for service TraceService to "mux".
+// The handlers forward requests to the grpc endpoint over "conn".
+func RegisterTraceServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
+ return RegisterTraceServiceHandlerClient(ctx, mux, NewTraceServiceClient(conn))
+}
+
+// RegisterTraceServiceHandlerClient registers the http handlers for service TraceService
+// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "TraceServiceClient".
+// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "TraceServiceClient"
+// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
+// "TraceServiceClient" to call the correct interceptors.
+func RegisterTraceServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client TraceServiceClient) error {
+
+ mux.Handle("POST", pattern_TraceService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ var err error
+ var annotatedContext context.Context
+ annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", runtime.WithHTTPPathPattern("/v1/traces"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_TraceService_Export_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_TraceService_Export_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ return nil
+}
+
+var (
+ pattern_TraceService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "traces"}, ""))
+)
+
+var (
+ forward_TraceService_Export_0 = runtime.ForwardResponseMessage
+)
diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go
new file mode 100644
index 000000000..dd1b73f1e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go
@@ -0,0 +1,109 @@
+// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
+// versions:
+// - protoc-gen-go-grpc v1.1.0
+// - protoc v3.21.6
+// source: opentelemetry/proto/collector/trace/v1/trace_service.proto
+
+package v1
+
+import (
+ context "context"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+)
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+// Requires gRPC-Go v1.32.0 or later.
+const _ = grpc.SupportPackageIsVersion7
+
+// TraceServiceClient is the client API for TraceService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
+type TraceServiceClient interface {
+ // For performance reasons, it is recommended to keep this RPC
+ // alive for the entire life of the application.
+ Export(ctx context.Context, in *ExportTraceServiceRequest, opts ...grpc.CallOption) (*ExportTraceServiceResponse, error)
+}
+
+type traceServiceClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewTraceServiceClient(cc grpc.ClientConnInterface) TraceServiceClient {
+ return &traceServiceClient{cc}
+}
+
+func (c *traceServiceClient) Export(ctx context.Context, in *ExportTraceServiceRequest, opts ...grpc.CallOption) (*ExportTraceServiceResponse, error) {
+ out := new(ExportTraceServiceResponse)
+ err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// TraceServiceServer is the server API for TraceService service.
+// All implementations must embed UnimplementedTraceServiceServer
+// for forward compatibility
+type TraceServiceServer interface {
+ // For performance reasons, it is recommended to keep this RPC
+ // alive for the entire life of the application.
+ Export(context.Context, *ExportTraceServiceRequest) (*ExportTraceServiceResponse, error)
+ mustEmbedUnimplementedTraceServiceServer()
+}
+
+// UnimplementedTraceServiceServer must be embedded to have forward compatible implementations.
+type UnimplementedTraceServiceServer struct {
+}
+
+func (UnimplementedTraceServiceServer) Export(context.Context, *ExportTraceServiceRequest) (*ExportTraceServiceResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Export not implemented")
+}
+func (UnimplementedTraceServiceServer) mustEmbedUnimplementedTraceServiceServer() {}
+
+// UnsafeTraceServiceServer may be embedded to opt out of forward compatibility for this service.
+// Use of this interface is not recommended, as added methods to TraceServiceServer will
+// result in compilation errors.
+type UnsafeTraceServiceServer interface {
+ mustEmbedUnimplementedTraceServiceServer()
+}
+
+func RegisterTraceServiceServer(s grpc.ServiceRegistrar, srv TraceServiceServer) {
+ s.RegisterService(&TraceService_ServiceDesc, srv)
+}
+
+func _TraceService_Export_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ExportTraceServiceRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(TraceServiceServer).Export(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/opentelemetry.proto.collector.trace.v1.TraceService/Export",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(TraceServiceServer).Export(ctx, req.(*ExportTraceServiceRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+// TraceService_ServiceDesc is the grpc.ServiceDesc for TraceService service.
+// It's only intended for direct use with grpc.RegisterService,
+// and not to be introspected or modified (even as a copy)
+var TraceService_ServiceDesc = grpc.ServiceDesc{
+ ServiceName: "opentelemetry.proto.collector.trace.v1.TraceService",
+ HandlerType: (*TraceServiceServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "Export",
+ Handler: _TraceService_Export_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "opentelemetry/proto/collector/trace/v1/trace_service.proto",
+}
diff --git a/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go b/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go
new file mode 100644
index 000000000..852209b09
--- /dev/null
+++ b/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go
@@ -0,0 +1,630 @@
+// Copyright 2019, OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v3.21.6
+// source: opentelemetry/proto/common/v1/common.proto
+
+package v1
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// AnyValue is used to represent any type of attribute value. AnyValue may contain a
+// primitive value such as a string or integer or it may contain an arbitrary nested
+// object containing arrays, key-value lists and primitives.
+type AnyValue struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The value is one of the listed fields. It is valid for all values to be unspecified
+ // in which case this AnyValue is considered to be "empty".
+ //
+ // Types that are assignable to Value:
+ // *AnyValue_StringValue
+ // *AnyValue_BoolValue
+ // *AnyValue_IntValue
+ // *AnyValue_DoubleValue
+ // *AnyValue_ArrayValue
+ // *AnyValue_KvlistValue
+ // *AnyValue_BytesValue
+ Value isAnyValue_Value `protobuf_oneof:"value"`
+}
+
+func (x *AnyValue) Reset() {
+ *x = AnyValue{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AnyValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AnyValue) ProtoMessage() {}
+
+func (x *AnyValue) ProtoReflect() protoreflect.Message {
+ mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AnyValue.ProtoReflect.Descriptor instead.
+func (*AnyValue) Descriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{0}
+}
+
+func (m *AnyValue) GetValue() isAnyValue_Value {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (x *AnyValue) GetStringValue() string {
+ if x, ok := x.GetValue().(*AnyValue_StringValue); ok {
+ return x.StringValue
+ }
+ return ""
+}
+
+func (x *AnyValue) GetBoolValue() bool {
+ if x, ok := x.GetValue().(*AnyValue_BoolValue); ok {
+ return x.BoolValue
+ }
+ return false
+}
+
+func (x *AnyValue) GetIntValue() int64 {
+ if x, ok := x.GetValue().(*AnyValue_IntValue); ok {
+ return x.IntValue
+ }
+ return 0
+}
+
+func (x *AnyValue) GetDoubleValue() float64 {
+ if x, ok := x.GetValue().(*AnyValue_DoubleValue); ok {
+ return x.DoubleValue
+ }
+ return 0
+}
+
+func (x *AnyValue) GetArrayValue() *ArrayValue {
+ if x, ok := x.GetValue().(*AnyValue_ArrayValue); ok {
+ return x.ArrayValue
+ }
+ return nil
+}
+
+func (x *AnyValue) GetKvlistValue() *KeyValueList {
+ if x, ok := x.GetValue().(*AnyValue_KvlistValue); ok {
+ return x.KvlistValue
+ }
+ return nil
+}
+
+func (x *AnyValue) GetBytesValue() []byte {
+ if x, ok := x.GetValue().(*AnyValue_BytesValue); ok {
+ return x.BytesValue
+ }
+ return nil
+}
+
+type isAnyValue_Value interface {
+ isAnyValue_Value()
+}
+
+type AnyValue_StringValue struct {
+ StringValue string `protobuf:"bytes,1,opt,name=string_value,json=stringValue,proto3,oneof"`
+}
+
+type AnyValue_BoolValue struct {
+ BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof"`
+}
+
+type AnyValue_IntValue struct {
+ IntValue int64 `protobuf:"varint,3,opt,name=int_value,json=intValue,proto3,oneof"`
+}
+
+type AnyValue_DoubleValue struct {
+ DoubleValue float64 `protobuf:"fixed64,4,opt,name=double_value,json=doubleValue,proto3,oneof"`
+}
+
+type AnyValue_ArrayValue struct {
+ ArrayValue *ArrayValue `protobuf:"bytes,5,opt,name=array_value,json=arrayValue,proto3,oneof"`
+}
+
+type AnyValue_KvlistValue struct {
+ KvlistValue *KeyValueList `protobuf:"bytes,6,opt,name=kvlist_value,json=kvlistValue,proto3,oneof"`
+}
+
+type AnyValue_BytesValue struct {
+ BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"`
+}
+
+func (*AnyValue_StringValue) isAnyValue_Value() {}
+
+func (*AnyValue_BoolValue) isAnyValue_Value() {}
+
+func (*AnyValue_IntValue) isAnyValue_Value() {}
+
+func (*AnyValue_DoubleValue) isAnyValue_Value() {}
+
+func (*AnyValue_ArrayValue) isAnyValue_Value() {}
+
+func (*AnyValue_KvlistValue) isAnyValue_Value() {}
+
+func (*AnyValue_BytesValue) isAnyValue_Value() {}
+
+// ArrayValue is a list of AnyValue messages. We need ArrayValue as a message
+// since oneof in AnyValue does not allow repeated fields.
+type ArrayValue struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Array of values. The array may be empty (contain 0 elements).
+ Values []*AnyValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+}
+
+func (x *ArrayValue) Reset() {
+ *x = ArrayValue{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ArrayValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ArrayValue) ProtoMessage() {}
+
+func (x *ArrayValue) ProtoReflect() protoreflect.Message {
+ mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ArrayValue.ProtoReflect.Descriptor instead.
+func (*ArrayValue) Descriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ArrayValue) GetValues() []*AnyValue {
+ if x != nil {
+ return x.Values
+ }
+ return nil
+}
+
+// KeyValueList is a list of KeyValue messages. We need KeyValueList as a message
+// since `oneof` in AnyValue does not allow repeated fields. Everywhere else where we need
+// a list of KeyValue messages (e.g. in Span) we use `repeated KeyValue` directly to
+// avoid unnecessary extra wrapping (which slows down the protocol). The 2 approaches
+// are semantically equivalent.
+type KeyValueList struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // A collection of key/value pairs of key-value pairs. The list may be empty (may
+ // contain 0 elements).
+ // The keys MUST be unique (it is not allowed to have more than one
+ // value with the same key).
+ Values []*KeyValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+}
+
+func (x *KeyValueList) Reset() {
+ *x = KeyValueList{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *KeyValueList) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*KeyValueList) ProtoMessage() {}
+
+func (x *KeyValueList) ProtoReflect() protoreflect.Message {
+ mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use KeyValueList.ProtoReflect.Descriptor instead.
+func (*KeyValueList) Descriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *KeyValueList) GetValues() []*KeyValue {
+ if x != nil {
+ return x.Values
+ }
+ return nil
+}
+
+// KeyValue is a key-value pair that is used to store Span attributes, Link
+// attributes, etc.
+type KeyValue struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ Value *AnyValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *KeyValue) Reset() {
+ *x = KeyValue{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *KeyValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*KeyValue) ProtoMessage() {}
+
+func (x *KeyValue) ProtoReflect() protoreflect.Message {
+ mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use KeyValue.ProtoReflect.Descriptor instead.
+func (*KeyValue) Descriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *KeyValue) GetKey() string {
+ if x != nil {
+ return x.Key
+ }
+ return ""
+}
+
+func (x *KeyValue) GetValue() *AnyValue {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+// InstrumentationScope is a message representing the instrumentation scope information
+// such as the fully qualified name and version.
+type InstrumentationScope struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // An empty instrumentation scope name means the name is unknown.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
+ // Additional attributes that describe the scope. [Optional].
+ // Attribute keys MUST be unique (it is not allowed to have more than one
+ // attribute with the same key).
+ Attributes []*KeyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"`
+ DroppedAttributesCount uint32 `protobuf:"varint,4,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
+}
+
+func (x *InstrumentationScope) Reset() {
+ *x = InstrumentationScope{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *InstrumentationScope) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*InstrumentationScope) ProtoMessage() {}
+
+func (x *InstrumentationScope) ProtoReflect() protoreflect.Message {
+ mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use InstrumentationScope.ProtoReflect.Descriptor instead.
+func (*InstrumentationScope) Descriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *InstrumentationScope) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *InstrumentationScope) GetVersion() string {
+ if x != nil {
+ return x.Version
+ }
+ return ""
+}
+
+func (x *InstrumentationScope) GetAttributes() []*KeyValue {
+ if x != nil {
+ return x.Attributes
+ }
+ return nil
+}
+
+func (x *InstrumentationScope) GetDroppedAttributesCount() uint32 {
+ if x != nil {
+ return x.DroppedAttributesCount
+ }
+ return 0
+}
+
+var File_opentelemetry_proto_common_v1_common_proto protoreflect.FileDescriptor
+
+var file_opentelemetry_proto_common_v1_common_proto_rawDesc = []byte{
+ 0x0a, 0x2a, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f,
+ 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1d, 0x6f, 0x70,
+ 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x22, 0xe0, 0x02, 0x0a, 0x08,
+ 0x41, 0x6e, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69,
+ 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00,
+ 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a,
+ 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1d,
+ 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x03, 0x48, 0x00, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a,
+ 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20,
+ 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c,
+ 0x75, 0x65, 0x12, 0x4c, 0x0a, 0x0b, 0x61, 0x72, 0x72, 0x61, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65,
+ 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x72, 0x72, 0x61, 0x79, 0x56, 0x61, 0x6c,
+ 0x75, 0x65, 0x48, 0x00, 0x52, 0x0a, 0x61, 0x72, 0x72, 0x61, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x12, 0x50, 0x0a, 0x0c, 0x6b, 0x76, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c,
+ 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4c,
+ 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x6b, 0x76, 0x6c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c,
+ 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65, 0x73,
+ 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x4d,
+ 0x0a, 0x0a, 0x41, 0x72, 0x72, 0x61, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3f, 0x0a, 0x06,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f,
+ 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x6e, 0x79,
+ 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0x4f, 0x0a,
+ 0x0c, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x3f, 0x0a,
+ 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e,
+ 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65,
+ 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0x5b,
+ 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
+ 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3d, 0x0a, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70,
+ 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x6e, 0x79, 0x56,
+ 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xc7, 0x01, 0x0a, 0x14,
+ 0x49, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53,
+ 0x63, 0x6f, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73,
+ 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73,
+ 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c,
+ 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52,
+ 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64,
+ 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65,
+ 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64,
+ 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73,
+ 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x7b, 0x0a, 0x20, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e,
+ 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f,
+ 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x28, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65,
+ 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f,
+ 0x76, 0x31, 0xaa, 0x02, 0x1d, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74,
+ 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e,
+ 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_opentelemetry_proto_common_v1_common_proto_rawDescOnce sync.Once
+ file_opentelemetry_proto_common_v1_common_proto_rawDescData = file_opentelemetry_proto_common_v1_common_proto_rawDesc
+)
+
+func file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP() []byte {
+ file_opentelemetry_proto_common_v1_common_proto_rawDescOnce.Do(func() {
+ file_opentelemetry_proto_common_v1_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_opentelemetry_proto_common_v1_common_proto_rawDescData)
+ })
+ return file_opentelemetry_proto_common_v1_common_proto_rawDescData
+}
+
+var file_opentelemetry_proto_common_v1_common_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
+var file_opentelemetry_proto_common_v1_common_proto_goTypes = []interface{}{
+ (*AnyValue)(nil), // 0: opentelemetry.proto.common.v1.AnyValue
+ (*ArrayValue)(nil), // 1: opentelemetry.proto.common.v1.ArrayValue
+ (*KeyValueList)(nil), // 2: opentelemetry.proto.common.v1.KeyValueList
+ (*KeyValue)(nil), // 3: opentelemetry.proto.common.v1.KeyValue
+ (*InstrumentationScope)(nil), // 4: opentelemetry.proto.common.v1.InstrumentationScope
+}
+var file_opentelemetry_proto_common_v1_common_proto_depIdxs = []int32{
+ 1, // 0: opentelemetry.proto.common.v1.AnyValue.array_value:type_name -> opentelemetry.proto.common.v1.ArrayValue
+ 2, // 1: opentelemetry.proto.common.v1.AnyValue.kvlist_value:type_name -> opentelemetry.proto.common.v1.KeyValueList
+ 0, // 2: opentelemetry.proto.common.v1.ArrayValue.values:type_name -> opentelemetry.proto.common.v1.AnyValue
+ 3, // 3: opentelemetry.proto.common.v1.KeyValueList.values:type_name -> opentelemetry.proto.common.v1.KeyValue
+ 0, // 4: opentelemetry.proto.common.v1.KeyValue.value:type_name -> opentelemetry.proto.common.v1.AnyValue
+ 3, // 5: opentelemetry.proto.common.v1.InstrumentationScope.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue
+ 6, // [6:6] is the sub-list for method output_type
+ 6, // [6:6] is the sub-list for method input_type
+ 6, // [6:6] is the sub-list for extension type_name
+ 6, // [6:6] is the sub-list for extension extendee
+ 0, // [0:6] is the sub-list for field type_name
+}
+
+func init() { file_opentelemetry_proto_common_v1_common_proto_init() }
+func file_opentelemetry_proto_common_v1_common_proto_init() {
+ if File_opentelemetry_proto_common_v1_common_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_opentelemetry_proto_common_v1_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AnyValue); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opentelemetry_proto_common_v1_common_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ArrayValue); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opentelemetry_proto_common_v1_common_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*KeyValueList); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opentelemetry_proto_common_v1_common_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*KeyValue); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opentelemetry_proto_common_v1_common_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*InstrumentationScope); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_opentelemetry_proto_common_v1_common_proto_msgTypes[0].OneofWrappers = []interface{}{
+ (*AnyValue_StringValue)(nil),
+ (*AnyValue_BoolValue)(nil),
+ (*AnyValue_IntValue)(nil),
+ (*AnyValue_DoubleValue)(nil),
+ (*AnyValue_ArrayValue)(nil),
+ (*AnyValue_KvlistValue)(nil),
+ (*AnyValue_BytesValue)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_opentelemetry_proto_common_v1_common_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 5,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_opentelemetry_proto_common_v1_common_proto_goTypes,
+ DependencyIndexes: file_opentelemetry_proto_common_v1_common_proto_depIdxs,
+ MessageInfos: file_opentelemetry_proto_common_v1_common_proto_msgTypes,
+ }.Build()
+ File_opentelemetry_proto_common_v1_common_proto = out.File
+ file_opentelemetry_proto_common_v1_common_proto_rawDesc = nil
+ file_opentelemetry_proto_common_v1_common_proto_goTypes = nil
+ file_opentelemetry_proto_common_v1_common_proto_depIdxs = nil
+}
diff --git a/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go b/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go
new file mode 100644
index 000000000..b7545b03b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go
@@ -0,0 +1,193 @@
+// Copyright 2019, OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v3.21.6
+// source: opentelemetry/proto/resource/v1/resource.proto
+
+package v1
+
+import (
+ v1 "go.opentelemetry.io/proto/otlp/common/v1"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Resource information.
+type Resource struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Set of attributes that describe the resource.
+ // Attribute keys MUST be unique (it is not allowed to have more than one
+ // attribute with the same key).
+ Attributes []*v1.KeyValue `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes,omitempty"`
+ // dropped_attributes_count is the number of dropped attributes. If the value is 0, then
+ // no attributes were dropped.
+ DroppedAttributesCount uint32 `protobuf:"varint,2,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
+}
+
+func (x *Resource) Reset() {
+ *x = Resource{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opentelemetry_proto_resource_v1_resource_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Resource) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Resource) ProtoMessage() {}
+
+func (x *Resource) ProtoReflect() protoreflect.Message {
+ mi := &file_opentelemetry_proto_resource_v1_resource_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Resource.ProtoReflect.Descriptor instead.
+func (*Resource) Descriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_resource_v1_resource_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Resource) GetAttributes() []*v1.KeyValue {
+ if x != nil {
+ return x.Attributes
+ }
+ return nil
+}
+
+func (x *Resource) GetDroppedAttributesCount() uint32 {
+ if x != nil {
+ return x.DroppedAttributesCount
+ }
+ return 0
+}
+
+var File_opentelemetry_proto_resource_v1_resource_proto protoreflect.FileDescriptor
+
+var file_opentelemetry_proto_resource_v1_resource_proto_rawDesc = []byte{
+ 0x0a, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x76,
+ 0x31, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x12, 0x1f, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x76,
+ 0x31, 0x1a, 0x2a, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79,
+ 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31,
+ 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8d, 0x01,
+ 0x0a, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74,
+ 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27,
+ 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b,
+ 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75,
+ 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61,
+ 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74,
+ 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x83, 0x01,
+ 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74,
+ 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72,
+ 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65,
+ 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x76,
+ 0x31, 0xaa, 0x02, 0x1f, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72,
+ 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_opentelemetry_proto_resource_v1_resource_proto_rawDescOnce sync.Once
+ file_opentelemetry_proto_resource_v1_resource_proto_rawDescData = file_opentelemetry_proto_resource_v1_resource_proto_rawDesc
+)
+
+func file_opentelemetry_proto_resource_v1_resource_proto_rawDescGZIP() []byte {
+ file_opentelemetry_proto_resource_v1_resource_proto_rawDescOnce.Do(func() {
+ file_opentelemetry_proto_resource_v1_resource_proto_rawDescData = protoimpl.X.CompressGZIP(file_opentelemetry_proto_resource_v1_resource_proto_rawDescData)
+ })
+ return file_opentelemetry_proto_resource_v1_resource_proto_rawDescData
+}
+
+var file_opentelemetry_proto_resource_v1_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_opentelemetry_proto_resource_v1_resource_proto_goTypes = []interface{}{
+ (*Resource)(nil), // 0: opentelemetry.proto.resource.v1.Resource
+ (*v1.KeyValue)(nil), // 1: opentelemetry.proto.common.v1.KeyValue
+}
+var file_opentelemetry_proto_resource_v1_resource_proto_depIdxs = []int32{
+ 1, // 0: opentelemetry.proto.resource.v1.Resource.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_opentelemetry_proto_resource_v1_resource_proto_init() }
+func file_opentelemetry_proto_resource_v1_resource_proto_init() {
+ if File_opentelemetry_proto_resource_v1_resource_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_opentelemetry_proto_resource_v1_resource_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Resource); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_opentelemetry_proto_resource_v1_resource_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_opentelemetry_proto_resource_v1_resource_proto_goTypes,
+ DependencyIndexes: file_opentelemetry_proto_resource_v1_resource_proto_depIdxs,
+ MessageInfos: file_opentelemetry_proto_resource_v1_resource_proto_msgTypes,
+ }.Build()
+ File_opentelemetry_proto_resource_v1_resource_proto = out.File
+ file_opentelemetry_proto_resource_v1_resource_proto_rawDesc = nil
+ file_opentelemetry_proto_resource_v1_resource_proto_goTypes = nil
+ file_opentelemetry_proto_resource_v1_resource_proto_depIdxs = nil
+}
diff --git a/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go b/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go
new file mode 100644
index 000000000..b342a0a94
--- /dev/null
+++ b/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go
@@ -0,0 +1,1283 @@
+// Copyright 2019, OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v3.21.6
+// source: opentelemetry/proto/trace/v1/trace.proto
+
+package v1
+
+import (
+ v11 "go.opentelemetry.io/proto/otlp/common/v1"
+ v1 "go.opentelemetry.io/proto/otlp/resource/v1"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// SpanFlags represents constants used to interpret the
+// Span.flags field, which is protobuf 'fixed32' type and is to
+// be used as bit-fields. Each non-zero value defined in this enum is
+// a bit-mask. To extract the bit-field, for example, use an
+// expression like:
+//
+// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK)
+//
+// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
+//
+// Note that Span flags were introduced in version 1.1 of the
+// OpenTelemetry protocol. Older Span producers do not set this
+// field, consequently consumers should not rely on the absence of a
+// particular flag bit to indicate the presence of a particular feature.
+type SpanFlags int32
+
+const (
+ // The zero value for the enum. Should not be used for comparisons.
+ // Instead use bitwise "and" with the appropriate mask as shown above.
+ SpanFlags_SPAN_FLAGS_DO_NOT_USE SpanFlags = 0
+ // Bits 0-7 are used for trace flags.
+ SpanFlags_SPAN_FLAGS_TRACE_FLAGS_MASK SpanFlags = 255
+ // Bits 8 and 9 are used to indicate that the parent span or link span is remote.
+ // Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known.
+ // Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote.
+ SpanFlags_SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK SpanFlags = 256
+ SpanFlags_SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK SpanFlags = 512
+)
+
+// Enum value maps for SpanFlags.
+var (
+ SpanFlags_name = map[int32]string{
+ 0: "SPAN_FLAGS_DO_NOT_USE",
+ 255: "SPAN_FLAGS_TRACE_FLAGS_MASK",
+ 256: "SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK",
+ 512: "SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK",
+ }
+ SpanFlags_value = map[string]int32{
+ "SPAN_FLAGS_DO_NOT_USE": 0,
+ "SPAN_FLAGS_TRACE_FLAGS_MASK": 255,
+ "SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK": 256,
+ "SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK": 512,
+ }
+)
+
+func (x SpanFlags) Enum() *SpanFlags {
+ p := new(SpanFlags)
+ *p = x
+ return p
+}
+
+func (x SpanFlags) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SpanFlags) Descriptor() protoreflect.EnumDescriptor {
+ return file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[0].Descriptor()
+}
+
+func (SpanFlags) Type() protoreflect.EnumType {
+ return &file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[0]
+}
+
+func (x SpanFlags) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use SpanFlags.Descriptor instead.
+func (SpanFlags) EnumDescriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{0}
+}
+
+// SpanKind is the type of span. Can be used to specify additional relationships between spans
+// in addition to a parent/child relationship.
+type Span_SpanKind int32
+
+const (
+ // Unspecified. Do NOT use as default.
+ // Implementations MAY assume SpanKind to be INTERNAL when receiving UNSPECIFIED.
+ Span_SPAN_KIND_UNSPECIFIED Span_SpanKind = 0
+ // Indicates that the span represents an internal operation within an application,
+ // as opposed to an operation happening at the boundaries. Default value.
+ Span_SPAN_KIND_INTERNAL Span_SpanKind = 1
+ // Indicates that the span covers server-side handling of an RPC or other
+ // remote network request.
+ Span_SPAN_KIND_SERVER Span_SpanKind = 2
+ // Indicates that the span describes a request to some remote service.
+ Span_SPAN_KIND_CLIENT Span_SpanKind = 3
+ // Indicates that the span describes a producer sending a message to a broker.
+ // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship
+ // between producer and consumer spans. A PRODUCER span ends when the message was accepted
+ // by the broker while the logical processing of the message might span a much longer time.
+ Span_SPAN_KIND_PRODUCER Span_SpanKind = 4
+ // Indicates that the span describes consumer receiving a message from a broker.
+ // Like the PRODUCER kind, there is often no direct critical path latency relationship
+ // between producer and consumer spans.
+ Span_SPAN_KIND_CONSUMER Span_SpanKind = 5
+)
+
+// Enum value maps for Span_SpanKind.
+var (
+ Span_SpanKind_name = map[int32]string{
+ 0: "SPAN_KIND_UNSPECIFIED",
+ 1: "SPAN_KIND_INTERNAL",
+ 2: "SPAN_KIND_SERVER",
+ 3: "SPAN_KIND_CLIENT",
+ 4: "SPAN_KIND_PRODUCER",
+ 5: "SPAN_KIND_CONSUMER",
+ }
+ Span_SpanKind_value = map[string]int32{
+ "SPAN_KIND_UNSPECIFIED": 0,
+ "SPAN_KIND_INTERNAL": 1,
+ "SPAN_KIND_SERVER": 2,
+ "SPAN_KIND_CLIENT": 3,
+ "SPAN_KIND_PRODUCER": 4,
+ "SPAN_KIND_CONSUMER": 5,
+ }
+)
+
+func (x Span_SpanKind) Enum() *Span_SpanKind {
+ p := new(Span_SpanKind)
+ *p = x
+ return p
+}
+
+func (x Span_SpanKind) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Span_SpanKind) Descriptor() protoreflect.EnumDescriptor {
+ return file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[1].Descriptor()
+}
+
+func (Span_SpanKind) Type() protoreflect.EnumType {
+ return &file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[1]
+}
+
+func (x Span_SpanKind) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Span_SpanKind.Descriptor instead.
+func (Span_SpanKind) EnumDescriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{3, 0}
+}
+
+// For the semantics of status codes see
+// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status
+type Status_StatusCode int32
+
+const (
+ // The default status.
+ Status_STATUS_CODE_UNSET Status_StatusCode = 0
+ // The Span has been validated by an Application developer or Operator to
+ // have completed successfully.
+ Status_STATUS_CODE_OK Status_StatusCode = 1
+ // The Span contains an error.
+ Status_STATUS_CODE_ERROR Status_StatusCode = 2
+)
+
+// Enum value maps for Status_StatusCode.
+var (
+ Status_StatusCode_name = map[int32]string{
+ 0: "STATUS_CODE_UNSET",
+ 1: "STATUS_CODE_OK",
+ 2: "STATUS_CODE_ERROR",
+ }
+ Status_StatusCode_value = map[string]int32{
+ "STATUS_CODE_UNSET": 0,
+ "STATUS_CODE_OK": 1,
+ "STATUS_CODE_ERROR": 2,
+ }
+)
+
+func (x Status_StatusCode) Enum() *Status_StatusCode {
+ p := new(Status_StatusCode)
+ *p = x
+ return p
+}
+
+func (x Status_StatusCode) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Status_StatusCode) Descriptor() protoreflect.EnumDescriptor {
+ return file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[2].Descriptor()
+}
+
+func (Status_StatusCode) Type() protoreflect.EnumType {
+ return &file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[2]
+}
+
+func (x Status_StatusCode) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Status_StatusCode.Descriptor instead.
+func (Status_StatusCode) EnumDescriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{4, 0}
+}
+
+// TracesData represents the traces data that can be stored in a persistent storage,
+// OR can be embedded by other protocols that transfer OTLP traces data but do
+// not implement the OTLP protocol.
+//
+// The main difference between this message and collector protocol is that
+// in this message there will not be any "control" or "metadata" specific to
+// OTLP protocol.
+//
+// When new fields are added into this message, the OTLP request MUST be updated
+// as well.
+type TracesData struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // An array of ResourceSpans.
+ // For data coming from a single resource this array will typically contain
+ // one element. Intermediary nodes that receive data from multiple origins
+ // typically batch the data before forwarding further and in that case this
+ // array will contain multiple elements.
+ ResourceSpans []*ResourceSpans `protobuf:"bytes,1,rep,name=resource_spans,json=resourceSpans,proto3" json:"resource_spans,omitempty"`
+}
+
+func (x *TracesData) Reset() {
+ *x = TracesData{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TracesData) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TracesData) ProtoMessage() {}
+
+func (x *TracesData) ProtoReflect() protoreflect.Message {
+ mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TracesData.ProtoReflect.Descriptor instead.
+func (*TracesData) Descriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *TracesData) GetResourceSpans() []*ResourceSpans {
+ if x != nil {
+ return x.ResourceSpans
+ }
+ return nil
+}
+
+// A collection of ScopeSpans from a Resource.
+type ResourceSpans struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The resource for the spans in this message.
+ // If this field is not set then no resource info is known.
+ Resource *v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"`
+ // A list of ScopeSpans that originate from a resource.
+ ScopeSpans []*ScopeSpans `protobuf:"bytes,2,rep,name=scope_spans,json=scopeSpans,proto3" json:"scope_spans,omitempty"`
+ // The Schema URL, if known. This is the identifier of the Schema that the resource data
+ // is recorded in. Notably, the last part of the URL path is the version number of the
+ // schema: http[s]://server[:port]/path/. To learn more about Schema URL see
+ // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
+ // This schema_url applies to the data in the "resource" field. It does not apply
+ // to the data in the "scope_spans" field which have their own schema_url field.
+ SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
+}
+
+func (x *ResourceSpans) Reset() {
+ *x = ResourceSpans{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ResourceSpans) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ResourceSpans) ProtoMessage() {}
+
+func (x *ResourceSpans) ProtoReflect() protoreflect.Message {
+ mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ResourceSpans.ProtoReflect.Descriptor instead.
+func (*ResourceSpans) Descriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ResourceSpans) GetResource() *v1.Resource {
+ if x != nil {
+ return x.Resource
+ }
+ return nil
+}
+
+func (x *ResourceSpans) GetScopeSpans() []*ScopeSpans {
+ if x != nil {
+ return x.ScopeSpans
+ }
+ return nil
+}
+
+func (x *ResourceSpans) GetSchemaUrl() string {
+ if x != nil {
+ return x.SchemaUrl
+ }
+ return ""
+}
+
+// A collection of Spans produced by an InstrumentationScope.
+type ScopeSpans struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The instrumentation scope information for the spans in this message.
+ // Semantically when InstrumentationScope isn't set, it is equivalent with
+ // an empty instrumentation scope name (unknown).
+ Scope *v11.InstrumentationScope `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope,omitempty"`
+ // A list of Spans that originate from an instrumentation scope.
+ Spans []*Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"`
+ // The Schema URL, if known. This is the identifier of the Schema that the span data
+ // is recorded in. Notably, the last part of the URL path is the version number of the
+ // schema: http[s]://server[:port]/path/. To learn more about Schema URL see
+ // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
+ // This schema_url applies to all spans and span events in the "spans" field.
+ SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
+}
+
+func (x *ScopeSpans) Reset() {
+ *x = ScopeSpans{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ScopeSpans) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ScopeSpans) ProtoMessage() {}
+
+func (x *ScopeSpans) ProtoReflect() protoreflect.Message {
+ mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ScopeSpans.ProtoReflect.Descriptor instead.
+func (*ScopeSpans) Descriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ScopeSpans) GetScope() *v11.InstrumentationScope {
+ if x != nil {
+ return x.Scope
+ }
+ return nil
+}
+
+func (x *ScopeSpans) GetSpans() []*Span {
+ if x != nil {
+ return x.Spans
+ }
+ return nil
+}
+
+func (x *ScopeSpans) GetSchemaUrl() string {
+ if x != nil {
+ return x.SchemaUrl
+ }
+ return ""
+}
+
+// A Span represents a single operation performed by a single component of the system.
+//
+// The next available field id is 17.
+type Span struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // A unique identifier for a trace. All spans from the same trace share
+ // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR
+ // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON
+ // is zero-length and thus is also invalid).
+ //
+ // This field is required.
+ TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"`
+ // A unique identifier for a span within a trace, assigned when the span
+ // is created. The ID is an 8-byte array. An ID with all zeroes OR of length
+ // other than 8 bytes is considered invalid (empty string in OTLP/JSON
+ // is zero-length and thus is also invalid).
+ //
+ // This field is required.
+ SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"`
+ // trace_state conveys information about request position in multiple distributed tracing graphs.
+ // It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header
+ // See also https://github.com/w3c/distributed-tracing for more details about this field.
+ TraceState string `protobuf:"bytes,3,opt,name=trace_state,json=traceState,proto3" json:"trace_state,omitempty"`
+ // The `span_id` of this span's parent span. If this is a root span, then this
+ // field must be empty. The ID is an 8-byte array.
+ ParentSpanId []byte `protobuf:"bytes,4,opt,name=parent_span_id,json=parentSpanId,proto3" json:"parent_span_id,omitempty"`
+ // Flags, a bit field.
+ //
+ // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace
+ // Context specification. To read the 8-bit W3C trace flag, use
+ // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`.
+ //
+ // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
+ //
+ // Bits 8 and 9 represent the 3 states of whether a span's parent
+ // is remote. The states are (unknown, is not remote, is remote).
+ // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`.
+ // To read whether the span is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`.
+ //
+ // When creating span messages, if the message is logically forwarded from another source
+ // with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD
+ // be copied as-is. If creating from a source that does not have an equivalent flags field
+ // (such as a runtime representation of an OpenTelemetry span), the high 22 bits MUST
+ // be set to zero.
+ // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero.
+ //
+ // [Optional].
+ Flags uint32 `protobuf:"fixed32,16,opt,name=flags,proto3" json:"flags,omitempty"`
+ // A description of the span's operation.
+ //
+ // For example, the name can be a qualified method name or a file name
+ // and a line number where the operation is called. A best practice is to use
+ // the same display name at the same call point in an application.
+ // This makes it easier to correlate spans in different traces.
+ //
+ // This field is semantically required to be set to non-empty string.
+ // Empty value is equivalent to an unknown span name.
+ //
+ // This field is required.
+ Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"`
+ // Distinguishes between spans generated in a particular context. For example,
+ // two spans with the same name may be distinguished using `CLIENT` (caller)
+ // and `SERVER` (callee) to identify queueing latency associated with the span.
+ Kind Span_SpanKind `protobuf:"varint,6,opt,name=kind,proto3,enum=opentelemetry.proto.trace.v1.Span_SpanKind" json:"kind,omitempty"`
+ // start_time_unix_nano is the start time of the span. On the client side, this is the time
+ // kept by the local machine where the span execution starts. On the server side, this
+ // is the time when the server's application handler starts running.
+ // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
+ //
+ // This field is semantically required and it is expected that end_time >= start_time.
+ StartTimeUnixNano uint64 `protobuf:"fixed64,7,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"`
+ // end_time_unix_nano is the end time of the span. On the client side, this is the time
+ // kept by the local machine where the span execution ends. On the server side, this
+ // is the time when the server application handler stops running.
+ // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
+ //
+ // This field is semantically required and it is expected that end_time >= start_time.
+ EndTimeUnixNano uint64 `protobuf:"fixed64,8,opt,name=end_time_unix_nano,json=endTimeUnixNano,proto3" json:"end_time_unix_nano,omitempty"`
+ // attributes is a collection of key/value pairs. Note, global attributes
+ // like server name can be set using the resource API. Examples of attributes:
+ //
+ // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"
+ // "/http/server_latency": 300
+ // "example.com/myattribute": true
+ // "example.com/score": 10.239
+ //
+ // The OpenTelemetry API specification further restricts the allowed value types:
+ // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute
+ // Attribute keys MUST be unique (it is not allowed to have more than one
+ // attribute with the same key).
+ Attributes []*v11.KeyValue `protobuf:"bytes,9,rep,name=attributes,proto3" json:"attributes,omitempty"`
+ // dropped_attributes_count is the number of attributes that were discarded. Attributes
+ // can be discarded because their keys are too long or because there are too many
+ // attributes. If this value is 0, then no attributes were dropped.
+ DroppedAttributesCount uint32 `protobuf:"varint,10,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
+ // events is a collection of Event items.
+ Events []*Span_Event `protobuf:"bytes,11,rep,name=events,proto3" json:"events,omitempty"`
+ // dropped_events_count is the number of dropped events. If the value is 0, then no
+ // events were dropped.
+ DroppedEventsCount uint32 `protobuf:"varint,12,opt,name=dropped_events_count,json=droppedEventsCount,proto3" json:"dropped_events_count,omitempty"`
+ // links is a collection of Links, which are references from this span to a span
+ // in the same or different trace.
+ Links []*Span_Link `protobuf:"bytes,13,rep,name=links,proto3" json:"links,omitempty"`
+ // dropped_links_count is the number of dropped links after the maximum size was
+ // enforced. If this value is 0, then no links were dropped.
+ DroppedLinksCount uint32 `protobuf:"varint,14,opt,name=dropped_links_count,json=droppedLinksCount,proto3" json:"dropped_links_count,omitempty"`
+ // An optional final status for this span. Semantically when Status isn't set, it means
+ // span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0).
+ Status *Status `protobuf:"bytes,15,opt,name=status,proto3" json:"status,omitempty"`
+}
+
+func (x *Span) Reset() {
+ *x = Span{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Span) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Span) ProtoMessage() {}
+
+func (x *Span) ProtoReflect() protoreflect.Message {
+ mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Span.ProtoReflect.Descriptor instead.
+func (*Span) Descriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *Span) GetTraceId() []byte {
+ if x != nil {
+ return x.TraceId
+ }
+ return nil
+}
+
+func (x *Span) GetSpanId() []byte {
+ if x != nil {
+ return x.SpanId
+ }
+ return nil
+}
+
+func (x *Span) GetTraceState() string {
+ if x != nil {
+ return x.TraceState
+ }
+ return ""
+}
+
+func (x *Span) GetParentSpanId() []byte {
+ if x != nil {
+ return x.ParentSpanId
+ }
+ return nil
+}
+
+func (x *Span) GetFlags() uint32 {
+ if x != nil {
+ return x.Flags
+ }
+ return 0
+}
+
+func (x *Span) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *Span) GetKind() Span_SpanKind {
+ if x != nil {
+ return x.Kind
+ }
+ return Span_SPAN_KIND_UNSPECIFIED
+}
+
+func (x *Span) GetStartTimeUnixNano() uint64 {
+ if x != nil {
+ return x.StartTimeUnixNano
+ }
+ return 0
+}
+
+func (x *Span) GetEndTimeUnixNano() uint64 {
+ if x != nil {
+ return x.EndTimeUnixNano
+ }
+ return 0
+}
+
+func (x *Span) GetAttributes() []*v11.KeyValue {
+ if x != nil {
+ return x.Attributes
+ }
+ return nil
+}
+
+func (x *Span) GetDroppedAttributesCount() uint32 {
+ if x != nil {
+ return x.DroppedAttributesCount
+ }
+ return 0
+}
+
+func (x *Span) GetEvents() []*Span_Event {
+ if x != nil {
+ return x.Events
+ }
+ return nil
+}
+
+func (x *Span) GetDroppedEventsCount() uint32 {
+ if x != nil {
+ return x.DroppedEventsCount
+ }
+ return 0
+}
+
+func (x *Span) GetLinks() []*Span_Link {
+ if x != nil {
+ return x.Links
+ }
+ return nil
+}
+
+func (x *Span) GetDroppedLinksCount() uint32 {
+ if x != nil {
+ return x.DroppedLinksCount
+ }
+ return 0
+}
+
+func (x *Span) GetStatus() *Status {
+ if x != nil {
+ return x.Status
+ }
+ return nil
+}
+
+// The Status type defines a logical error model that is suitable for different
+// programming environments, including REST APIs and RPC APIs.
+type Status struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // A developer-facing human readable error message.
+ Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
+ // The status code.
+ Code Status_StatusCode `protobuf:"varint,3,opt,name=code,proto3,enum=opentelemetry.proto.trace.v1.Status_StatusCode" json:"code,omitempty"`
+}
+
+func (x *Status) Reset() {
+ *x = Status{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Status) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Status) ProtoMessage() {}
+
+func (x *Status) ProtoReflect() protoreflect.Message {
+ mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Status.ProtoReflect.Descriptor instead.
+func (*Status) Descriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *Status) GetMessage() string {
+ if x != nil {
+ return x.Message
+ }
+ return ""
+}
+
+func (x *Status) GetCode() Status_StatusCode {
+ if x != nil {
+ return x.Code
+ }
+ return Status_STATUS_CODE_UNSET
+}
+
+// Event is a time-stamped annotation of the span, consisting of user-supplied
+// text description and key-value pairs.
+type Span_Event struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // time_unix_nano is the time the event occurred.
+ TimeUnixNano uint64 `protobuf:"fixed64,1,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
+ // name of the event.
+ // This field is semantically required to be set to non-empty string.
+ Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
+ // attributes is a collection of attribute key/value pairs on the event.
+ // Attribute keys MUST be unique (it is not allowed to have more than one
+ // attribute with the same key).
+ Attributes []*v11.KeyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"`
+ // dropped_attributes_count is the number of dropped attributes. If the value is 0,
+ // then no attributes were dropped.
+ DroppedAttributesCount uint32 `protobuf:"varint,4,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
+}
+
+func (x *Span_Event) Reset() {
+ *x = Span_Event{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Span_Event) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Span_Event) ProtoMessage() {}
+
+func (x *Span_Event) ProtoReflect() protoreflect.Message {
+ mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Span_Event.ProtoReflect.Descriptor instead.
+func (*Span_Event) Descriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{3, 0}
+}
+
+func (x *Span_Event) GetTimeUnixNano() uint64 {
+ if x != nil {
+ return x.TimeUnixNano
+ }
+ return 0
+}
+
+func (x *Span_Event) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *Span_Event) GetAttributes() []*v11.KeyValue {
+ if x != nil {
+ return x.Attributes
+ }
+ return nil
+}
+
+func (x *Span_Event) GetDroppedAttributesCount() uint32 {
+ if x != nil {
+ return x.DroppedAttributesCount
+ }
+ return 0
+}
+
+// A pointer from the current span to another span in the same trace or in a
+// different trace. For example, this can be used in batching operations,
+// where a single batch handler processes multiple requests from different
+// traces or when the handler receives a request from a different project.
+type Span_Link struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // A unique identifier of a trace that this linked span is part of. The ID is a
+ // 16-byte array.
+ TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"`
+ // A unique identifier for the linked span. The ID is an 8-byte array.
+ SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"`
+ // The trace_state associated with the link.
+ TraceState string `protobuf:"bytes,3,opt,name=trace_state,json=traceState,proto3" json:"trace_state,omitempty"`
+ // attributes is a collection of attribute key/value pairs on the link.
+ // Attribute keys MUST be unique (it is not allowed to have more than one
+ // attribute with the same key).
+ Attributes []*v11.KeyValue `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes,omitempty"`
+ // dropped_attributes_count is the number of dropped attributes. If the value is 0,
+ // then no attributes were dropped.
+ DroppedAttributesCount uint32 `protobuf:"varint,5,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
+ // Flags, a bit field.
+ //
+ // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace
+ // Context specification. To read the 8-bit W3C trace flag, use
+ // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`.
+ //
+ // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
+ //
+ // Bits 8 and 9 represent the 3 states of whether the link is remote.
+ // The states are (unknown, is not remote, is remote).
+ // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`.
+ // To read whether the link is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`.
+ //
+ // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero.
+ // When creating new spans, bits 10-31 (most-significant 22-bits) MUST be zero.
+ //
+ // [Optional].
+ Flags uint32 `protobuf:"fixed32,6,opt,name=flags,proto3" json:"flags,omitempty"`
+}
+
+func (x *Span_Link) Reset() {
+ *x = Span_Link{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Span_Link) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Span_Link) ProtoMessage() {}
+
+func (x *Span_Link) ProtoReflect() protoreflect.Message {
+ mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Span_Link.ProtoReflect.Descriptor instead.
+func (*Span_Link) Descriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{3, 1}
+}
+
+func (x *Span_Link) GetTraceId() []byte {
+ if x != nil {
+ return x.TraceId
+ }
+ return nil
+}
+
+func (x *Span_Link) GetSpanId() []byte {
+ if x != nil {
+ return x.SpanId
+ }
+ return nil
+}
+
+func (x *Span_Link) GetTraceState() string {
+ if x != nil {
+ return x.TraceState
+ }
+ return ""
+}
+
+func (x *Span_Link) GetAttributes() []*v11.KeyValue {
+ if x != nil {
+ return x.Attributes
+ }
+ return nil
+}
+
+func (x *Span_Link) GetDroppedAttributesCount() uint32 {
+ if x != nil {
+ return x.DroppedAttributesCount
+ }
+ return 0
+}
+
+func (x *Span_Link) GetFlags() uint32 {
+ if x != nil {
+ return x.Flags
+ }
+ return 0
+}
+
+var File_opentelemetry_proto_trace_v1_trace_proto protoreflect.FileDescriptor
+
+var file_opentelemetry_proto_trace_v1_trace_proto_rawDesc = []byte{
+ 0x0a, 0x28, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x74,
+ 0x72, 0x61, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1c, 0x6f, 0x70, 0x65, 0x6e,
+ 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e,
+ 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x2a, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65,
+ 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f,
+ 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65,
+ 0x74, 0x72, 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x60, 0x0a, 0x0a, 0x54, 0x72, 0x61, 0x63, 0x65, 0x73, 0x44, 0x61,
+ 0x74, 0x61, 0x12, 0x52, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73,
+ 0x70, 0x61, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x6f, 0x70, 0x65,
+ 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x22, 0xc8, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x45, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x6f, 0x70, 0x65,
+ 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12,
+ 0x49, 0x0a, 0x0b, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x18, 0x02,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d,
+ 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65,
+ 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x52, 0x0a,
+ 0x73, 0x63, 0x6f, 0x70, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x63,
+ 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09,
+ 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x55, 0x72, 0x6c, 0x4a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0xe9,
+ 0x07, 0x22, 0xb0, 0x01, 0x0a, 0x0a, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73,
+ 0x12, 0x49, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x33, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e,
+ 0x49, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53,
+ 0x63, 0x6f, 0x70, 0x65, 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x05, 0x73,
+ 0x70, 0x61, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6f, 0x70, 0x65,
+ 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x05,
+ 0x73, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f,
+ 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x63, 0x68, 0x65, 0x6d,
+ 0x61, 0x55, 0x72, 0x6c, 0x22, 0xc8, 0x0a, 0x0a, 0x04, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x19, 0x0a,
+ 0x08, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52,
+ 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x73, 0x70, 0x61, 0x6e,
+ 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x73, 0x70, 0x61, 0x6e, 0x49,
+ 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x72, 0x61, 0x63, 0x65, 0x53, 0x74, 0x61,
+ 0x74, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x70, 0x61,
+ 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x70, 0x61, 0x72, 0x65,
+ 0x6e, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67,
+ 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x07, 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x12,
+ 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e,
+ 0x32, 0x2b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e,
+ 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x04, 0x6b,
+ 0x69, 0x6e, 0x64, 0x12, 0x2f, 0x0a, 0x14, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d,
+ 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x07, 0x20, 0x01, 0x28,
+ 0x06, 0x52, 0x11, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78,
+ 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x2b, 0x0a, 0x12, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65,
+ 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x08, 0x20, 0x01, 0x28, 0x06,
+ 0x52, 0x0f, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e,
+ 0x6f, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18,
+ 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65,
+ 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d,
+ 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a,
+ 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72,
+ 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73,
+ 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72,
+ 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43,
+ 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x40, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x0b,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d,
+ 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65,
+ 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06,
+ 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65,
+ 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0c,
+ 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x45, 0x76, 0x65,
+ 0x6e, 0x74, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3d, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b,
+ 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65,
+ 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72,
+ 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x4c, 0x69, 0x6e, 0x6b,
+ 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x64, 0x72, 0x6f, 0x70, 0x70,
+ 0x65, 0x64, 0x5f, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e,
+ 0x20, 0x01, 0x28, 0x0d, 0x52, 0x11, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x4c, 0x69, 0x6e,
+ 0x6b, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75,
+ 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65,
+ 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72,
+ 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x1a, 0xc4, 0x01, 0x0a, 0x05, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12,
+ 0x24, 0x0a, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e,
+ 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x06, 0x52, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69,
+ 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74,
+ 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e,
+ 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65,
+ 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74,
+ 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74,
+ 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74,
+ 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0xf4, 0x01, 0x0a,
+ 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69,
+ 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64,
+ 0x12, 0x17, 0x0a, 0x07, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0c, 0x52, 0x06, 0x73, 0x70, 0x61, 0x6e, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x72, 0x61,
+ 0x63, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a,
+ 0x74, 0x72, 0x61, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74,
+ 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27,
+ 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b,
+ 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75,
+ 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61,
+ 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18,
+ 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74,
+ 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x14, 0x0a,
+ 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x07, 0x52, 0x05, 0x66, 0x6c,
+ 0x61, 0x67, 0x73, 0x22, 0x99, 0x01, 0x0a, 0x08, 0x53, 0x70, 0x61, 0x6e, 0x4b, 0x69, 0x6e, 0x64,
+ 0x12, 0x19, 0x0a, 0x15, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x55, 0x4e,
+ 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x53,
+ 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x49, 0x4e, 0x54, 0x45, 0x52, 0x4e, 0x41,
+ 0x4c, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44,
+ 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x50, 0x41,
+ 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x10, 0x03, 0x12,
+ 0x16, 0x0a, 0x12, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x50, 0x52, 0x4f,
+ 0x44, 0x55, 0x43, 0x45, 0x52, 0x10, 0x04, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x50, 0x41, 0x4e, 0x5f,
+ 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x43, 0x4f, 0x4e, 0x53, 0x55, 0x4d, 0x45, 0x52, 0x10, 0x05, 0x22,
+ 0xbd, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65,
+ 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73,
+ 0x73, 0x61, 0x67, 0x65, 0x12, 0x43, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74,
+ 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76,
+ 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43,
+ 0x6f, 0x64, 0x65, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x22, 0x4e, 0x0a, 0x0a, 0x53, 0x74, 0x61,
+ 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x55,
+ 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x45, 0x54, 0x10, 0x00, 0x12, 0x12,
+ 0x0a, 0x0e, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x4f, 0x4b,
+ 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44,
+ 0x45, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x2a,
+ 0x9c, 0x01, 0x0a, 0x09, 0x53, 0x70, 0x61, 0x6e, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x19, 0x0a,
+ 0x15, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x53, 0x5f, 0x44, 0x4f, 0x5f, 0x4e,
+ 0x4f, 0x54, 0x5f, 0x55, 0x53, 0x45, 0x10, 0x00, 0x12, 0x20, 0x0a, 0x1b, 0x53, 0x50, 0x41, 0x4e,
+ 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x53, 0x5f, 0x54, 0x52, 0x41, 0x43, 0x45, 0x5f, 0x46, 0x4c, 0x41,
+ 0x47, 0x53, 0x5f, 0x4d, 0x41, 0x53, 0x4b, 0x10, 0xff, 0x01, 0x12, 0x2a, 0x0a, 0x25, 0x53, 0x50,
+ 0x41, 0x4e, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x53, 0x5f, 0x43, 0x4f, 0x4e, 0x54, 0x45, 0x58, 0x54,
+ 0x5f, 0x48, 0x41, 0x53, 0x5f, 0x49, 0x53, 0x5f, 0x52, 0x45, 0x4d, 0x4f, 0x54, 0x45, 0x5f, 0x4d,
+ 0x41, 0x53, 0x4b, 0x10, 0x80, 0x02, 0x12, 0x26, 0x0a, 0x21, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x46,
+ 0x4c, 0x41, 0x47, 0x53, 0x5f, 0x43, 0x4f, 0x4e, 0x54, 0x45, 0x58, 0x54, 0x5f, 0x49, 0x53, 0x5f,
+ 0x52, 0x45, 0x4d, 0x4f, 0x54, 0x45, 0x5f, 0x4d, 0x41, 0x53, 0x4b, 0x10, 0x80, 0x04, 0x42, 0x77,
+ 0x0a, 0x1f, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74,
+ 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76,
+ 0x31, 0x42, 0x0a, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a,
+ 0x27, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72,
+ 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f,
+ 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0xaa, 0x02, 0x1c, 0x4f, 0x70, 0x65, 0x6e, 0x54,
+ 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54,
+ 0x72, 0x61, 0x63, 0x65, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_opentelemetry_proto_trace_v1_trace_proto_rawDescOnce sync.Once
+ file_opentelemetry_proto_trace_v1_trace_proto_rawDescData = file_opentelemetry_proto_trace_v1_trace_proto_rawDesc
+)
+
+func file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP() []byte {
+ file_opentelemetry_proto_trace_v1_trace_proto_rawDescOnce.Do(func() {
+ file_opentelemetry_proto_trace_v1_trace_proto_rawDescData = protoimpl.X.CompressGZIP(file_opentelemetry_proto_trace_v1_trace_proto_rawDescData)
+ })
+ return file_opentelemetry_proto_trace_v1_trace_proto_rawDescData
+}
+
+var file_opentelemetry_proto_trace_v1_trace_proto_enumTypes = make([]protoimpl.EnumInfo, 3)
+var file_opentelemetry_proto_trace_v1_trace_proto_msgTypes = make([]protoimpl.MessageInfo, 7)
+var file_opentelemetry_proto_trace_v1_trace_proto_goTypes = []interface{}{
+ (SpanFlags)(0), // 0: opentelemetry.proto.trace.v1.SpanFlags
+ (Span_SpanKind)(0), // 1: opentelemetry.proto.trace.v1.Span.SpanKind
+ (Status_StatusCode)(0), // 2: opentelemetry.proto.trace.v1.Status.StatusCode
+ (*TracesData)(nil), // 3: opentelemetry.proto.trace.v1.TracesData
+ (*ResourceSpans)(nil), // 4: opentelemetry.proto.trace.v1.ResourceSpans
+ (*ScopeSpans)(nil), // 5: opentelemetry.proto.trace.v1.ScopeSpans
+ (*Span)(nil), // 6: opentelemetry.proto.trace.v1.Span
+ (*Status)(nil), // 7: opentelemetry.proto.trace.v1.Status
+ (*Span_Event)(nil), // 8: opentelemetry.proto.trace.v1.Span.Event
+ (*Span_Link)(nil), // 9: opentelemetry.proto.trace.v1.Span.Link
+ (*v1.Resource)(nil), // 10: opentelemetry.proto.resource.v1.Resource
+ (*v11.InstrumentationScope)(nil), // 11: opentelemetry.proto.common.v1.InstrumentationScope
+ (*v11.KeyValue)(nil), // 12: opentelemetry.proto.common.v1.KeyValue
+}
+var file_opentelemetry_proto_trace_v1_trace_proto_depIdxs = []int32{
+ 4, // 0: opentelemetry.proto.trace.v1.TracesData.resource_spans:type_name -> opentelemetry.proto.trace.v1.ResourceSpans
+ 10, // 1: opentelemetry.proto.trace.v1.ResourceSpans.resource:type_name -> opentelemetry.proto.resource.v1.Resource
+ 5, // 2: opentelemetry.proto.trace.v1.ResourceSpans.scope_spans:type_name -> opentelemetry.proto.trace.v1.ScopeSpans
+ 11, // 3: opentelemetry.proto.trace.v1.ScopeSpans.scope:type_name -> opentelemetry.proto.common.v1.InstrumentationScope
+ 6, // 4: opentelemetry.proto.trace.v1.ScopeSpans.spans:type_name -> opentelemetry.proto.trace.v1.Span
+ 1, // 5: opentelemetry.proto.trace.v1.Span.kind:type_name -> opentelemetry.proto.trace.v1.Span.SpanKind
+ 12, // 6: opentelemetry.proto.trace.v1.Span.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue
+ 8, // 7: opentelemetry.proto.trace.v1.Span.events:type_name -> opentelemetry.proto.trace.v1.Span.Event
+ 9, // 8: opentelemetry.proto.trace.v1.Span.links:type_name -> opentelemetry.proto.trace.v1.Span.Link
+ 7, // 9: opentelemetry.proto.trace.v1.Span.status:type_name -> opentelemetry.proto.trace.v1.Status
+ 2, // 10: opentelemetry.proto.trace.v1.Status.code:type_name -> opentelemetry.proto.trace.v1.Status.StatusCode
+ 12, // 11: opentelemetry.proto.trace.v1.Span.Event.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue
+ 12, // 12: opentelemetry.proto.trace.v1.Span.Link.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue
+ 13, // [13:13] is the sub-list for method output_type
+ 13, // [13:13] is the sub-list for method input_type
+ 13, // [13:13] is the sub-list for extension type_name
+ 13, // [13:13] is the sub-list for extension extendee
+ 0, // [0:13] is the sub-list for field type_name
+}
+
+func init() { file_opentelemetry_proto_trace_v1_trace_proto_init() }
+func file_opentelemetry_proto_trace_v1_trace_proto_init() {
+ if File_opentelemetry_proto_trace_v1_trace_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TracesData); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ResourceSpans); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ScopeSpans); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Span); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Status); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Span_Event); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Span_Link); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_opentelemetry_proto_trace_v1_trace_proto_rawDesc,
+ NumEnums: 3,
+ NumMessages: 7,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_opentelemetry_proto_trace_v1_trace_proto_goTypes,
+ DependencyIndexes: file_opentelemetry_proto_trace_v1_trace_proto_depIdxs,
+ EnumInfos: file_opentelemetry_proto_trace_v1_trace_proto_enumTypes,
+ MessageInfos: file_opentelemetry_proto_trace_v1_trace_proto_msgTypes,
+ }.Build()
+ File_opentelemetry_proto_trace_v1_trace_proto = out.File
+ file_opentelemetry_proto_trace_v1_trace_proto_rawDesc = nil
+ file_opentelemetry_proto_trace_v1_trace_proto_goTypes = nil
+ file_opentelemetry_proto_trace_v1_trace_proto_depIdxs = nil
+}
diff --git a/vendor/go.uber.org/zap/zapgrpc/zapgrpc.go b/vendor/go.uber.org/zap/zapgrpc/zapgrpc.go
new file mode 100644
index 000000000..682de254d
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapgrpc/zapgrpc.go
@@ -0,0 +1,243 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package zapgrpc provides a logger that is compatible with grpclog.
+package zapgrpc // import "go.uber.org/zap/zapgrpc"
+
+import (
+ "fmt"
+
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+)
+
+// See https://github.com/grpc/grpc-go/blob/v1.35.0/grpclog/loggerv2.go#L77-L86
+const (
+ grpcLvlInfo int = iota
+ grpcLvlWarn
+ grpcLvlError
+ grpcLvlFatal
+)
+
+// _grpcToZapLevel maps gRPC log levels to zap log levels.
+// See https://pkg.go.dev/go.uber.org/zap@v1.16.0/zapcore#Level
+var _grpcToZapLevel = map[int]zapcore.Level{
+ grpcLvlInfo: zapcore.InfoLevel,
+ grpcLvlWarn: zapcore.WarnLevel,
+ grpcLvlError: zapcore.ErrorLevel,
+ grpcLvlFatal: zapcore.FatalLevel,
+}
+
+// An Option overrides a Logger's default configuration.
+type Option interface {
+ apply(*Logger)
+}
+
+type optionFunc func(*Logger)
+
+func (f optionFunc) apply(log *Logger) {
+ f(log)
+}
+
+// WithDebug configures a Logger to print at zap's DebugLevel instead of
+// InfoLevel.
+// It only affects the Printf, Println and Print methods, which are only used in the gRPC v1 grpclog.Logger API.
+//
+// Deprecated: use grpclog.SetLoggerV2() for v2 API.
+func WithDebug() Option {
+ return optionFunc(func(logger *Logger) {
+ logger.print = &printer{
+ enab: logger.levelEnabler,
+ level: zapcore.DebugLevel,
+ print: logger.delegate.Debug,
+ printf: logger.delegate.Debugf,
+ }
+ })
+}
+
+// withWarn redirects the fatal level to the warn level, which makes testing
+// easier. This is intentionally unexported.
+func withWarn() Option {
+ return optionFunc(func(logger *Logger) {
+ logger.fatal = &printer{
+ enab: logger.levelEnabler,
+ level: zapcore.WarnLevel,
+ print: logger.delegate.Warn,
+ printf: logger.delegate.Warnf,
+ }
+ })
+}
+
+// NewLogger returns a new Logger.
+func NewLogger(l *zap.Logger, options ...Option) *Logger {
+ logger := &Logger{
+ delegate: l.Sugar(),
+ levelEnabler: l.Core(),
+ }
+ logger.print = &printer{
+ enab: logger.levelEnabler,
+ level: zapcore.InfoLevel,
+ print: logger.delegate.Info,
+ printf: logger.delegate.Infof,
+ }
+ logger.fatal = &printer{
+ enab: logger.levelEnabler,
+ level: zapcore.FatalLevel,
+ print: logger.delegate.Fatal,
+ printf: logger.delegate.Fatalf,
+ }
+ for _, option := range options {
+ option.apply(logger)
+ }
+ return logger
+}
+
+// printer implements Print, Printf, and Println operations for a Zap level.
+//
+// We use it to customize Debug vs Info, and Warn vs Fatal for Print and Fatal
+// respectively.
+type printer struct {
+ enab zapcore.LevelEnabler
+ level zapcore.Level
+ print func(...interface{})
+ printf func(string, ...interface{})
+}
+
+func (v *printer) Print(args ...interface{}) {
+ v.print(args...)
+}
+
+func (v *printer) Printf(format string, args ...interface{}) {
+ v.printf(format, args...)
+}
+
+func (v *printer) Println(args ...interface{}) {
+ if v.enab.Enabled(v.level) {
+ v.print(sprintln(args))
+ }
+}
+
+// Logger adapts zap's Logger to be compatible with grpclog.LoggerV2 and the deprecated grpclog.Logger.
+type Logger struct {
+ delegate *zap.SugaredLogger
+ levelEnabler zapcore.LevelEnabler
+ print *printer
+ fatal *printer
+ // printToDebug bool
+ // fatalToWarn bool
+}
+
+// Print implements grpclog.Logger.
+//
+// Deprecated: use [Logger.Info].
+func (l *Logger) Print(args ...interface{}) {
+ l.print.Print(args...)
+}
+
+// Printf implements grpclog.Logger.
+//
+// Deprecated: use [Logger.Infof].
+func (l *Logger) Printf(format string, args ...interface{}) {
+ l.print.Printf(format, args...)
+}
+
+// Println implements grpclog.Logger.
+//
+// Deprecated: use [Logger.Info].
+func (l *Logger) Println(args ...interface{}) {
+ l.print.Println(args...)
+}
+
+// Info implements grpclog.LoggerV2.
+func (l *Logger) Info(args ...interface{}) {
+ l.delegate.Info(args...)
+}
+
+// Infoln implements grpclog.LoggerV2.
+func (l *Logger) Infoln(args ...interface{}) {
+ if l.levelEnabler.Enabled(zapcore.InfoLevel) {
+ l.delegate.Info(sprintln(args))
+ }
+}
+
+// Infof implements grpclog.LoggerV2.
+func (l *Logger) Infof(format string, args ...interface{}) {
+ l.delegate.Infof(format, args...)
+}
+
+// Warning implements grpclog.LoggerV2.
+func (l *Logger) Warning(args ...interface{}) {
+ l.delegate.Warn(args...)
+}
+
+// Warningln implements grpclog.LoggerV2.
+func (l *Logger) Warningln(args ...interface{}) {
+ if l.levelEnabler.Enabled(zapcore.WarnLevel) {
+ l.delegate.Warn(sprintln(args))
+ }
+}
+
+// Warningf implements grpclog.LoggerV2.
+func (l *Logger) Warningf(format string, args ...interface{}) {
+ l.delegate.Warnf(format, args...)
+}
+
+// Error implements grpclog.LoggerV2.
+func (l *Logger) Error(args ...interface{}) {
+ l.delegate.Error(args...)
+}
+
+// Errorln implements grpclog.LoggerV2.
+func (l *Logger) Errorln(args ...interface{}) {
+ if l.levelEnabler.Enabled(zapcore.ErrorLevel) {
+ l.delegate.Error(sprintln(args))
+ }
+}
+
+// Errorf implements grpclog.LoggerV2.
+func (l *Logger) Errorf(format string, args ...interface{}) {
+ l.delegate.Errorf(format, args...)
+}
+
+// Fatal implements grpclog.LoggerV2.
+func (l *Logger) Fatal(args ...interface{}) {
+ l.fatal.Print(args...)
+}
+
+// Fatalln implements grpclog.LoggerV2.
+func (l *Logger) Fatalln(args ...interface{}) {
+ l.fatal.Println(args...)
+}
+
+// Fatalf implements grpclog.LoggerV2.
+func (l *Logger) Fatalf(format string, args ...interface{}) {
+ l.fatal.Printf(format, args...)
+}
+
+// V implements grpclog.LoggerV2.
+func (l *Logger) V(level int) bool {
+ return l.levelEnabler.Enabled(_grpcToZapLevel[level])
+}
+
+func sprintln(args []interface{}) string {
+ s := fmt.Sprintln(args...)
+ // Drop the new line character added by Sprintln
+ return s[:len(s)-1]
+}
diff --git a/vendor/go.yaml.in/yaml/v2/.travis.yml b/vendor/go.yaml.in/yaml/v2/.travis.yml
new file mode 100644
index 000000000..7348c50c0
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v2/.travis.yml
@@ -0,0 +1,17 @@
+language: go
+
+go:
+ - "1.4.x"
+ - "1.5.x"
+ - "1.6.x"
+ - "1.7.x"
+ - "1.8.x"
+ - "1.9.x"
+ - "1.10.x"
+ - "1.11.x"
+ - "1.12.x"
+ - "1.13.x"
+ - "1.14.x"
+ - "tip"
+
+go_import_path: gopkg.in/yaml.v2
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/LICENSE b/vendor/go.yaml.in/yaml/v2/LICENSE
similarity index 100%
rename from vendor/sigs.k8s.io/structured-merge-diff/v4/LICENSE
rename to vendor/go.yaml.in/yaml/v2/LICENSE
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE.libyaml b/vendor/go.yaml.in/yaml/v2/LICENSE.libyaml
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE.libyaml
rename to vendor/go.yaml.in/yaml/v2/LICENSE.libyaml
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/NOTICE b/vendor/go.yaml.in/yaml/v2/NOTICE
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/NOTICE
rename to vendor/go.yaml.in/yaml/v2/NOTICE
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/README.md b/vendor/go.yaml.in/yaml/v2/README.md
similarity index 76%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/README.md
rename to vendor/go.yaml.in/yaml/v2/README.md
index 53f4139dc..c9388da42 100644
--- a/vendor/sigs.k8s.io/yaml/goyaml.v2/README.md
+++ b/vendor/go.yaml.in/yaml/v2/README.md
@@ -1,13 +1,3 @@
-# go-yaml fork
-
-This package is a fork of the go-yaml library and is intended solely for consumption
-by kubernetes projects. In this fork, we plan to support only critical changes required for
-kubernetes, such as small bug fixes and regressions. Larger, general-purpose feature requests
-should be made in the upstream go-yaml library, and we will reject such changes in this fork
-unless we are pulling them from upstream.
-
-This fork is based on v2.4.0: https://github.com/go-yaml/yaml/releases/tag/v2.4.0
-
# YAML support for the Go language
Introduction
@@ -30,18 +20,16 @@ supported since they're a poor design and are gone in YAML 1.2.
Installation and usage
----------------------
-The import path for the package is *gopkg.in/yaml.v2*.
+The import path for the package is *go.yaml.in/yaml/v2*.
To install it, run:
- go get gopkg.in/yaml.v2
+ go get go.yaml.in/yaml/v2
API documentation
-----------------
-If opened in a browser, the import path itself leads to the API documentation:
-
- * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2)
+See:
API stability
-------------
@@ -65,7 +53,7 @@ import (
"fmt"
"log"
- "gopkg.in/yaml.v2"
+ "go.yaml.in/yaml/v2"
)
var data = `
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/apic.go b/vendor/go.yaml.in/yaml/v2/apic.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/apic.go
rename to vendor/go.yaml.in/yaml/v2/apic.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/decode.go b/vendor/go.yaml.in/yaml/v2/decode.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/decode.go
rename to vendor/go.yaml.in/yaml/v2/decode.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/emitterc.go b/vendor/go.yaml.in/yaml/v2/emitterc.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/emitterc.go
rename to vendor/go.yaml.in/yaml/v2/emitterc.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/encode.go b/vendor/go.yaml.in/yaml/v2/encode.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/encode.go
rename to vendor/go.yaml.in/yaml/v2/encode.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/parserc.go b/vendor/go.yaml.in/yaml/v2/parserc.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/parserc.go
rename to vendor/go.yaml.in/yaml/v2/parserc.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/readerc.go b/vendor/go.yaml.in/yaml/v2/readerc.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/readerc.go
rename to vendor/go.yaml.in/yaml/v2/readerc.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/resolve.go b/vendor/go.yaml.in/yaml/v2/resolve.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/resolve.go
rename to vendor/go.yaml.in/yaml/v2/resolve.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/scannerc.go b/vendor/go.yaml.in/yaml/v2/scannerc.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/scannerc.go
rename to vendor/go.yaml.in/yaml/v2/scannerc.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/sorter.go b/vendor/go.yaml.in/yaml/v2/sorter.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/sorter.go
rename to vendor/go.yaml.in/yaml/v2/sorter.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/writerc.go b/vendor/go.yaml.in/yaml/v2/writerc.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/writerc.go
rename to vendor/go.yaml.in/yaml/v2/writerc.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go b/vendor/go.yaml.in/yaml/v2/yaml.go
similarity index 99%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go
rename to vendor/go.yaml.in/yaml/v2/yaml.go
index 30813884c..5248e1263 100644
--- a/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go
+++ b/vendor/go.yaml.in/yaml/v2/yaml.go
@@ -2,7 +2,7 @@
//
// Source code and other details for the project are available at GitHub:
//
-// https://github.com/go-yaml/yaml
+// https://github.com/yaml/go-yaml
//
package yaml
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlh.go b/vendor/go.yaml.in/yaml/v2/yamlh.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/yamlh.go
rename to vendor/go.yaml.in/yaml/v2/yamlh.go
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlprivateh.go b/vendor/go.yaml.in/yaml/v2/yamlprivateh.go
similarity index 100%
rename from vendor/sigs.k8s.io/yaml/goyaml.v2/yamlprivateh.go
rename to vendor/go.yaml.in/yaml/v2/yamlprivateh.go
diff --git a/vendor/go.yaml.in/yaml/v3/LICENSE b/vendor/go.yaml.in/yaml/v3/LICENSE
new file mode 100644
index 000000000..2683e4bb1
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v3/LICENSE
@@ -0,0 +1,50 @@
+
+This project is covered by two different licenses: MIT and Apache.
+
+#### MIT License ####
+
+The following files were ported to Go from C files of libyaml, and thus
+are still covered by their original MIT license, with the additional
+copyright staring in 2011 when the project was ported over:
+
+ apic.go emitterc.go parserc.go readerc.go scannerc.go
+ writerc.go yamlh.go yamlprivateh.go
+
+Copyright (c) 2006-2010 Kirill Simonov
+Copyright (c) 2006-2011 Kirill Simonov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+### Apache License ###
+
+All the remaining project files are covered by the Apache license:
+
+Copyright (c) 2011-2019 Canonical Ltd
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/go.yaml.in/yaml/v3/NOTICE b/vendor/go.yaml.in/yaml/v3/NOTICE
new file mode 100644
index 000000000..866d74a7a
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v3/NOTICE
@@ -0,0 +1,13 @@
+Copyright 2011-2016 Canonical Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/go.yaml.in/yaml/v3/README.md b/vendor/go.yaml.in/yaml/v3/README.md
new file mode 100644
index 000000000..15a85a635
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v3/README.md
@@ -0,0 +1,171 @@
+go.yaml.in/yaml
+===============
+
+YAML Support for the Go Language
+
+
+## Introduction
+
+The `yaml` package enables [Go](https://go.dev/) programs to comfortably encode
+and decode [YAML](https://yaml.org/) values.
+
+It was originally developed within [Canonical](https://www.canonical.com) as
+part of the [juju](https://juju.ubuntu.com) project, and is based on a pure Go
+port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) C library to
+parse and generate YAML data quickly and reliably.
+
+
+## Project Status
+
+This project started as a fork of the extremely popular [go-yaml](
+https://github.com/go-yaml/yaml/)
+project, and is being maintained by the official [YAML organization](
+https://github.com/yaml/).
+
+The YAML team took over ongoing maintenance and development of the project after
+discussion with go-yaml's author, @niemeyer, following his decision to
+[label the project repository as "unmaintained"](
+https://github.com/go-yaml/yaml/blob/944c86a7d2/README.md) in April 2025.
+
+We have put together a team of dedicated maintainers including representatives
+of go-yaml's most important downstream projects.
+
+We will strive to earn the trust of the various go-yaml forks to switch back to
+this repository as their upstream.
+
+Please [contact us](https://cloud-native.slack.com/archives/C08PPAT8PS7) if you
+would like to contribute or be involved.
+
+
+## Compatibility
+
+The `yaml` package supports most of YAML 1.2, but preserves some behavior from
+1.1 for backwards compatibility.
+
+Specifically, v3 of the `yaml` package:
+
+* Supports YAML 1.1 bools (`yes`/`no`, `on`/`off`) as long as they are being
+ decoded into a typed bool value.
+ Otherwise they behave as a string.
+ Booleans in YAML 1.2 are `true`/`false` only.
+* Supports octals encoded and decoded as `0777` per YAML 1.1, rather than
+ `0o777` as specified in YAML 1.2, because most parsers still use the old
+ format.
+ Octals in the `0o777` format are supported though, so new files work.
+* Does not support base-60 floats.
+ These are gone from YAML 1.2, and were actually never supported by this
+ package as it's clearly a poor choice.
+
+
+## Installation and Usage
+
+The import path for the package is *go.yaml.in/yaml/v3*.
+
+To install it, run:
+
+```bash
+go get go.yaml.in/yaml/v3
+```
+
+
+## API Documentation
+
+See:
+
+
+## API Stability
+
+The package API for yaml v3 will remain stable as described in [gopkg.in](
+https://gopkg.in).
+
+
+## Example
+
+```go
+package main
+
+import (
+ "fmt"
+ "log"
+
+ "go.yaml.in/yaml/v3"
+)
+
+var data = `
+a: Easy!
+b:
+ c: 2
+ d: [3, 4]
+`
+
+// Note: struct fields must be public in order for unmarshal to
+// correctly populate the data.
+type T struct {
+ A string
+ B struct {
+ RenamedC int `yaml:"c"`
+ D []int `yaml:",flow"`
+ }
+}
+
+func main() {
+ t := T{}
+
+ err := yaml.Unmarshal([]byte(data), &t)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- t:\n%v\n\n", t)
+
+ d, err := yaml.Marshal(&t)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- t dump:\n%s\n\n", string(d))
+
+ m := make(map[interface{}]interface{})
+
+ err = yaml.Unmarshal([]byte(data), &m)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- m:\n%v\n\n", m)
+
+ d, err = yaml.Marshal(&m)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- m dump:\n%s\n\n", string(d))
+}
+```
+
+This example will generate the following output:
+
+```
+--- t:
+{Easy! {2 [3 4]}}
+
+--- t dump:
+a: Easy!
+b:
+ c: 2
+ d: [3, 4]
+
+
+--- m:
+map[a:Easy! b:map[c:2 d:[3 4]]]
+
+--- m dump:
+a: Easy!
+b:
+ c: 2
+ d:
+ - 3
+ - 4
+```
+
+
+## License
+
+The yaml package is licensed under the MIT and Apache License 2.0 licenses.
+Please see the LICENSE file for details.
diff --git a/vendor/go.yaml.in/yaml/v3/apic.go b/vendor/go.yaml.in/yaml/v3/apic.go
new file mode 100644
index 000000000..05fd305da
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v3/apic.go
@@ -0,0 +1,747 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "io"
+)
+
+func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
+ //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
+
+ // Check if we can move the queue at the beginning of the buffer.
+ if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
+ if parser.tokens_head != len(parser.tokens) {
+ copy(parser.tokens, parser.tokens[parser.tokens_head:])
+ }
+ parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
+ parser.tokens_head = 0
+ }
+ parser.tokens = append(parser.tokens, *token)
+ if pos < 0 {
+ return
+ }
+ copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
+ parser.tokens[parser.tokens_head+pos] = *token
+}
+
+// Create a new parser object.
+func yaml_parser_initialize(parser *yaml_parser_t) bool {
+ *parser = yaml_parser_t{
+ raw_buffer: make([]byte, 0, input_raw_buffer_size),
+ buffer: make([]byte, 0, input_buffer_size),
+ }
+ return true
+}
+
+// Destroy a parser object.
+func yaml_parser_delete(parser *yaml_parser_t) {
+ *parser = yaml_parser_t{}
+}
+
+// String read handler.
+func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+ if parser.input_pos == len(parser.input) {
+ return 0, io.EOF
+ }
+ n = copy(buffer, parser.input[parser.input_pos:])
+ parser.input_pos += n
+ return n, nil
+}
+
+// Reader read handler.
+func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+ return parser.input_reader.Read(buffer)
+}
+
+// Set a string input.
+func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
+ if parser.read_handler != nil {
+ panic("must set the input source only once")
+ }
+ parser.read_handler = yaml_string_read_handler
+ parser.input = input
+ parser.input_pos = 0
+}
+
+// Set a file input.
+func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) {
+ if parser.read_handler != nil {
+ panic("must set the input source only once")
+ }
+ parser.read_handler = yaml_reader_read_handler
+ parser.input_reader = r
+}
+
+// Set the source encoding.
+func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
+ if parser.encoding != yaml_ANY_ENCODING {
+ panic("must set the encoding only once")
+ }
+ parser.encoding = encoding
+}
+
+// Create a new emitter object.
+func yaml_emitter_initialize(emitter *yaml_emitter_t) {
+ *emitter = yaml_emitter_t{
+ buffer: make([]byte, output_buffer_size),
+ raw_buffer: make([]byte, 0, output_raw_buffer_size),
+ states: make([]yaml_emitter_state_t, 0, initial_stack_size),
+ events: make([]yaml_event_t, 0, initial_queue_size),
+ best_width: -1,
+ }
+}
+
+// Destroy an emitter object.
+func yaml_emitter_delete(emitter *yaml_emitter_t) {
+ *emitter = yaml_emitter_t{}
+}
+
+// String write handler.
+func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+ *emitter.output_buffer = append(*emitter.output_buffer, buffer...)
+ return nil
+}
+
+// yaml_writer_write_handler uses emitter.output_writer to write the
+// emitted text.
+func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+ _, err := emitter.output_writer.Write(buffer)
+ return err
+}
+
+// Set a string output.
+func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
+ if emitter.write_handler != nil {
+ panic("must set the output target only once")
+ }
+ emitter.write_handler = yaml_string_write_handler
+ emitter.output_buffer = output_buffer
+}
+
+// Set a file output.
+func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
+ if emitter.write_handler != nil {
+ panic("must set the output target only once")
+ }
+ emitter.write_handler = yaml_writer_write_handler
+ emitter.output_writer = w
+}
+
+// Set the output encoding.
+func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
+ if emitter.encoding != yaml_ANY_ENCODING {
+ panic("must set the output encoding only once")
+ }
+ emitter.encoding = encoding
+}
+
+// Set the canonical output style.
+func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
+ emitter.canonical = canonical
+}
+
+// Set the indentation increment.
+func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
+ if indent < 2 || indent > 9 {
+ indent = 2
+ }
+ emitter.best_indent = indent
+}
+
+// Set the preferred line width.
+func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
+ if width < 0 {
+ width = -1
+ }
+ emitter.best_width = width
+}
+
+// Set if unescaped non-ASCII characters are allowed.
+func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
+ emitter.unicode = unicode
+}
+
+// Set the preferred line break character.
+func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
+ emitter.line_break = line_break
+}
+
+///*
+// * Destroy a token object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_token_delete(yaml_token_t *token)
+//{
+// assert(token); // Non-NULL token object expected.
+//
+// switch (token.type)
+// {
+// case YAML_TAG_DIRECTIVE_TOKEN:
+// yaml_free(token.data.tag_directive.handle);
+// yaml_free(token.data.tag_directive.prefix);
+// break;
+//
+// case YAML_ALIAS_TOKEN:
+// yaml_free(token.data.alias.value);
+// break;
+//
+// case YAML_ANCHOR_TOKEN:
+// yaml_free(token.data.anchor.value);
+// break;
+//
+// case YAML_TAG_TOKEN:
+// yaml_free(token.data.tag.handle);
+// yaml_free(token.data.tag.suffix);
+// break;
+//
+// case YAML_SCALAR_TOKEN:
+// yaml_free(token.data.scalar.value);
+// break;
+//
+// default:
+// break;
+// }
+//
+// memset(token, 0, sizeof(yaml_token_t));
+//}
+//
+///*
+// * Check if a string is a valid UTF-8 sequence.
+// *
+// * Check 'reader.c' for more details on UTF-8 encoding.
+// */
+//
+//static int
+//yaml_check_utf8(yaml_char_t *start, size_t length)
+//{
+// yaml_char_t *end = start+length;
+// yaml_char_t *pointer = start;
+//
+// while (pointer < end) {
+// unsigned char octet;
+// unsigned int width;
+// unsigned int value;
+// size_t k;
+//
+// octet = pointer[0];
+// width = (octet & 0x80) == 0x00 ? 1 :
+// (octet & 0xE0) == 0xC0 ? 2 :
+// (octet & 0xF0) == 0xE0 ? 3 :
+// (octet & 0xF8) == 0xF0 ? 4 : 0;
+// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
+// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
+// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
+// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
+// if (!width) return 0;
+// if (pointer+width > end) return 0;
+// for (k = 1; k < width; k ++) {
+// octet = pointer[k];
+// if ((octet & 0xC0) != 0x80) return 0;
+// value = (value << 6) + (octet & 0x3F);
+// }
+// if (!((width == 1) ||
+// (width == 2 && value >= 0x80) ||
+// (width == 3 && value >= 0x800) ||
+// (width == 4 && value >= 0x10000))) return 0;
+//
+// pointer += width;
+// }
+//
+// return 1;
+//}
+//
+
+// Create STREAM-START.
+func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
+ *event = yaml_event_t{
+ typ: yaml_STREAM_START_EVENT,
+ encoding: encoding,
+ }
+}
+
+// Create STREAM-END.
+func yaml_stream_end_event_initialize(event *yaml_event_t) {
+ *event = yaml_event_t{
+ typ: yaml_STREAM_END_EVENT,
+ }
+}
+
+// Create DOCUMENT-START.
+func yaml_document_start_event_initialize(
+ event *yaml_event_t,
+ version_directive *yaml_version_directive_t,
+ tag_directives []yaml_tag_directive_t,
+ implicit bool,
+) {
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ version_directive: version_directive,
+ tag_directives: tag_directives,
+ implicit: implicit,
+ }
+}
+
+// Create DOCUMENT-END.
+func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_END_EVENT,
+ implicit: implicit,
+ }
+}
+
+// Create ALIAS.
+func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) bool {
+ *event = yaml_event_t{
+ typ: yaml_ALIAS_EVENT,
+ anchor: anchor,
+ }
+ return true
+}
+
+// Create SCALAR.
+func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ anchor: anchor,
+ tag: tag,
+ value: value,
+ implicit: plain_implicit,
+ quoted_implicit: quoted_implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create SEQUENCE-START.
+func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create SEQUENCE-END.
+func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ }
+ return true
+}
+
+// Create MAPPING-START.
+func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) {
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(style),
+ }
+}
+
+// Create MAPPING-END.
+func yaml_mapping_end_event_initialize(event *yaml_event_t) {
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ }
+}
+
+// Destroy an event object.
+func yaml_event_delete(event *yaml_event_t) {
+ *event = yaml_event_t{}
+}
+
+///*
+// * Create a document object.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_initialize(document *yaml_document_t,
+// version_directive *yaml_version_directive_t,
+// tag_directives_start *yaml_tag_directive_t,
+// tag_directives_end *yaml_tag_directive_t,
+// start_implicit int, end_implicit int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// struct {
+// start *yaml_node_t
+// end *yaml_node_t
+// top *yaml_node_t
+// } nodes = { NULL, NULL, NULL }
+// version_directive_copy *yaml_version_directive_t = NULL
+// struct {
+// start *yaml_tag_directive_t
+// end *yaml_tag_directive_t
+// top *yaml_tag_directive_t
+// } tag_directives_copy = { NULL, NULL, NULL }
+// value yaml_tag_directive_t = { NULL, NULL }
+// mark yaml_mark_t = { 0, 0, 0 }
+//
+// assert(document) // Non-NULL document object is expected.
+// assert((tag_directives_start && tag_directives_end) ||
+// (tag_directives_start == tag_directives_end))
+// // Valid tag directives are expected.
+//
+// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
+//
+// if (version_directive) {
+// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
+// if (!version_directive_copy) goto error
+// version_directive_copy.major = version_directive.major
+// version_directive_copy.minor = version_directive.minor
+// }
+//
+// if (tag_directives_start != tag_directives_end) {
+// tag_directive *yaml_tag_directive_t
+// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
+// goto error
+// for (tag_directive = tag_directives_start
+// tag_directive != tag_directives_end; tag_directive ++) {
+// assert(tag_directive.handle)
+// assert(tag_directive.prefix)
+// if (!yaml_check_utf8(tag_directive.handle,
+// strlen((char *)tag_directive.handle)))
+// goto error
+// if (!yaml_check_utf8(tag_directive.prefix,
+// strlen((char *)tag_directive.prefix)))
+// goto error
+// value.handle = yaml_strdup(tag_directive.handle)
+// value.prefix = yaml_strdup(tag_directive.prefix)
+// if (!value.handle || !value.prefix) goto error
+// if (!PUSH(&context, tag_directives_copy, value))
+// goto error
+// value.handle = NULL
+// value.prefix = NULL
+// }
+// }
+//
+// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
+// tag_directives_copy.start, tag_directives_copy.top,
+// start_implicit, end_implicit, mark, mark)
+//
+// return 1
+//
+//error:
+// STACK_DEL(&context, nodes)
+// yaml_free(version_directive_copy)
+// while (!STACK_EMPTY(&context, tag_directives_copy)) {
+// value yaml_tag_directive_t = POP(&context, tag_directives_copy)
+// yaml_free(value.handle)
+// yaml_free(value.prefix)
+// }
+// STACK_DEL(&context, tag_directives_copy)
+// yaml_free(value.handle)
+// yaml_free(value.prefix)
+//
+// return 0
+//}
+//
+///*
+// * Destroy a document object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_document_delete(document *yaml_document_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// tag_directive *yaml_tag_directive_t
+//
+// context.error = YAML_NO_ERROR // Eliminate a compiler warning.
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// while (!STACK_EMPTY(&context, document.nodes)) {
+// node yaml_node_t = POP(&context, document.nodes)
+// yaml_free(node.tag)
+// switch (node.type) {
+// case YAML_SCALAR_NODE:
+// yaml_free(node.data.scalar.value)
+// break
+// case YAML_SEQUENCE_NODE:
+// STACK_DEL(&context, node.data.sequence.items)
+// break
+// case YAML_MAPPING_NODE:
+// STACK_DEL(&context, node.data.mapping.pairs)
+// break
+// default:
+// assert(0) // Should not happen.
+// }
+// }
+// STACK_DEL(&context, document.nodes)
+//
+// yaml_free(document.version_directive)
+// for (tag_directive = document.tag_directives.start
+// tag_directive != document.tag_directives.end
+// tag_directive++) {
+// yaml_free(tag_directive.handle)
+// yaml_free(tag_directive.prefix)
+// }
+// yaml_free(document.tag_directives.start)
+//
+// memset(document, 0, sizeof(yaml_document_t))
+//}
+//
+///**
+// * Get a document node.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_node(document *yaml_document_t, index int)
+//{
+// assert(document) // Non-NULL document object is expected.
+//
+// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
+// return document.nodes.start + index - 1
+// }
+// return NULL
+//}
+//
+///**
+// * Get the root object.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_root_node(document *yaml_document_t)
+//{
+// assert(document) // Non-NULL document object is expected.
+//
+// if (document.nodes.top != document.nodes.start) {
+// return document.nodes.start
+// }
+// return NULL
+//}
+//
+///*
+// * Add a scalar node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_scalar(document *yaml_document_t,
+// tag *yaml_char_t, value *yaml_char_t, length int,
+// style yaml_scalar_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// value_copy *yaml_char_t = NULL
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+// assert(value) // Non-NULL value is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (length < 0) {
+// length = strlen((char *)value)
+// }
+//
+// if (!yaml_check_utf8(value, length)) goto error
+// value_copy = yaml_malloc(length+1)
+// if (!value_copy) goto error
+// memcpy(value_copy, value, length)
+// value_copy[length] = '\0'
+//
+// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// yaml_free(tag_copy)
+// yaml_free(value_copy)
+//
+// return 0
+//}
+//
+///*
+// * Add a sequence node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_sequence(document *yaml_document_t,
+// tag *yaml_char_t, style yaml_sequence_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// struct {
+// start *yaml_node_item_t
+// end *yaml_node_item_t
+// top *yaml_node_item_t
+// } items = { NULL, NULL, NULL }
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
+//
+// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
+// style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// STACK_DEL(&context, items)
+// yaml_free(tag_copy)
+//
+// return 0
+//}
+//
+///*
+// * Add a mapping node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_mapping(document *yaml_document_t,
+// tag *yaml_char_t, style yaml_mapping_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// struct {
+// start *yaml_node_pair_t
+// end *yaml_node_pair_t
+// top *yaml_node_pair_t
+// } pairs = { NULL, NULL, NULL }
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
+//
+// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
+// style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// STACK_DEL(&context, pairs)
+// yaml_free(tag_copy)
+//
+// return 0
+//}
+//
+///*
+// * Append an item to a sequence node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_sequence_item(document *yaml_document_t,
+// sequence int, item int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+//
+// assert(document) // Non-NULL document is required.
+// assert(sequence > 0
+// && document.nodes.start + sequence <= document.nodes.top)
+// // Valid sequence id is required.
+// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
+// // A sequence node is required.
+// assert(item > 0 && document.nodes.start + item <= document.nodes.top)
+// // Valid item id is required.
+//
+// if (!PUSH(&context,
+// document.nodes.start[sequence-1].data.sequence.items, item))
+// return 0
+//
+// return 1
+//}
+//
+///*
+// * Append a pair of a key and a value to a mapping node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_mapping_pair(document *yaml_document_t,
+// mapping int, key int, value int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+//
+// pair yaml_node_pair_t
+//
+// assert(document) // Non-NULL document is required.
+// assert(mapping > 0
+// && document.nodes.start + mapping <= document.nodes.top)
+// // Valid mapping id is required.
+// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
+// // A mapping node is required.
+// assert(key > 0 && document.nodes.start + key <= document.nodes.top)
+// // Valid key id is required.
+// assert(value > 0 && document.nodes.start + value <= document.nodes.top)
+// // Valid value id is required.
+//
+// pair.key = key
+// pair.value = value
+//
+// if (!PUSH(&context,
+// document.nodes.start[mapping-1].data.mapping.pairs, pair))
+// return 0
+//
+// return 1
+//}
+//
+//
diff --git a/vendor/go.yaml.in/yaml/v3/decode.go b/vendor/go.yaml.in/yaml/v3/decode.go
new file mode 100644
index 000000000..02e2b17bf
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v3/decode.go
@@ -0,0 +1,1018 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package yaml
+
+import (
+ "encoding"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "math"
+ "reflect"
+ "strconv"
+ "time"
+)
+
+// ----------------------------------------------------------------------------
+// Parser, produces a node tree out of a libyaml event stream.
+
+type parser struct {
+ parser yaml_parser_t
+ event yaml_event_t
+ doc *Node
+ anchors map[string]*Node
+ doneInit bool
+ textless bool
+}
+
+func newParser(b []byte) *parser {
+ p := parser{}
+ if !yaml_parser_initialize(&p.parser) {
+ panic("failed to initialize YAML emitter")
+ }
+ if len(b) == 0 {
+ b = []byte{'\n'}
+ }
+ yaml_parser_set_input_string(&p.parser, b)
+ return &p
+}
+
+func newParserFromReader(r io.Reader) *parser {
+ p := parser{}
+ if !yaml_parser_initialize(&p.parser) {
+ panic("failed to initialize YAML emitter")
+ }
+ yaml_parser_set_input_reader(&p.parser, r)
+ return &p
+}
+
+func (p *parser) init() {
+ if p.doneInit {
+ return
+ }
+ p.anchors = make(map[string]*Node)
+ p.expect(yaml_STREAM_START_EVENT)
+ p.doneInit = true
+}
+
+func (p *parser) destroy() {
+ if p.event.typ != yaml_NO_EVENT {
+ yaml_event_delete(&p.event)
+ }
+ yaml_parser_delete(&p.parser)
+}
+
+// expect consumes an event from the event stream and
+// checks that it's of the expected type.
+func (p *parser) expect(e yaml_event_type_t) {
+ if p.event.typ == yaml_NO_EVENT {
+ if !yaml_parser_parse(&p.parser, &p.event) {
+ p.fail()
+ }
+ }
+ if p.event.typ == yaml_STREAM_END_EVENT {
+ failf("attempted to go past the end of stream; corrupted value?")
+ }
+ if p.event.typ != e {
+ p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ)
+ p.fail()
+ }
+ yaml_event_delete(&p.event)
+ p.event.typ = yaml_NO_EVENT
+}
+
+// peek peeks at the next event in the event stream,
+// puts the results into p.event and returns the event type.
+func (p *parser) peek() yaml_event_type_t {
+ if p.event.typ != yaml_NO_EVENT {
+ return p.event.typ
+ }
+ // It's curious choice from the underlying API to generally return a
+ // positive result on success, but on this case return true in an error
+ // scenario. This was the source of bugs in the past (issue #666).
+ if !yaml_parser_parse(&p.parser, &p.event) || p.parser.error != yaml_NO_ERROR {
+ p.fail()
+ }
+ return p.event.typ
+}
+
+func (p *parser) fail() {
+ var where string
+ var line int
+ if p.parser.context_mark.line != 0 {
+ line = p.parser.context_mark.line
+ // Scanner errors don't iterate line before returning error
+ if p.parser.error == yaml_SCANNER_ERROR {
+ line++
+ }
+ } else if p.parser.problem_mark.line != 0 {
+ line = p.parser.problem_mark.line
+ // Scanner errors don't iterate line before returning error
+ if p.parser.error == yaml_SCANNER_ERROR {
+ line++
+ }
+ }
+ if line != 0 {
+ where = "line " + strconv.Itoa(line) + ": "
+ }
+ var msg string
+ if len(p.parser.problem) > 0 {
+ msg = p.parser.problem
+ } else {
+ msg = "unknown problem parsing YAML content"
+ }
+ failf("%s%s", where, msg)
+}
+
+func (p *parser) anchor(n *Node, anchor []byte) {
+ if anchor != nil {
+ n.Anchor = string(anchor)
+ p.anchors[n.Anchor] = n
+ }
+}
+
+func (p *parser) parse() *Node {
+ p.init()
+ switch p.peek() {
+ case yaml_SCALAR_EVENT:
+ return p.scalar()
+ case yaml_ALIAS_EVENT:
+ return p.alias()
+ case yaml_MAPPING_START_EVENT:
+ return p.mapping()
+ case yaml_SEQUENCE_START_EVENT:
+ return p.sequence()
+ case yaml_DOCUMENT_START_EVENT:
+ return p.document()
+ case yaml_STREAM_END_EVENT:
+ // Happens when attempting to decode an empty buffer.
+ return nil
+ case yaml_TAIL_COMMENT_EVENT:
+ panic("internal error: unexpected tail comment event (please report)")
+ default:
+ panic("internal error: attempted to parse unknown event (please report): " + p.event.typ.String())
+ }
+}
+
+func (p *parser) node(kind Kind, defaultTag, tag, value string) *Node {
+ var style Style
+ if tag != "" && tag != "!" {
+ tag = shortTag(tag)
+ style = TaggedStyle
+ } else if defaultTag != "" {
+ tag = defaultTag
+ } else if kind == ScalarNode {
+ tag, _ = resolve("", value)
+ }
+ n := &Node{
+ Kind: kind,
+ Tag: tag,
+ Value: value,
+ Style: style,
+ }
+ if !p.textless {
+ n.Line = p.event.start_mark.line + 1
+ n.Column = p.event.start_mark.column + 1
+ n.HeadComment = string(p.event.head_comment)
+ n.LineComment = string(p.event.line_comment)
+ n.FootComment = string(p.event.foot_comment)
+ }
+ return n
+}
+
+func (p *parser) parseChild(parent *Node) *Node {
+ child := p.parse()
+ parent.Content = append(parent.Content, child)
+ return child
+}
+
+func (p *parser) document() *Node {
+ n := p.node(DocumentNode, "", "", "")
+ p.doc = n
+ p.expect(yaml_DOCUMENT_START_EVENT)
+ p.parseChild(n)
+ if p.peek() == yaml_DOCUMENT_END_EVENT {
+ n.FootComment = string(p.event.foot_comment)
+ }
+ p.expect(yaml_DOCUMENT_END_EVENT)
+ return n
+}
+
+func (p *parser) alias() *Node {
+ n := p.node(AliasNode, "", "", string(p.event.anchor))
+ n.Alias = p.anchors[n.Value]
+ if n.Alias == nil {
+ failf("unknown anchor '%s' referenced", n.Value)
+ }
+ p.expect(yaml_ALIAS_EVENT)
+ return n
+}
+
+func (p *parser) scalar() *Node {
+ var parsedStyle = p.event.scalar_style()
+ var nodeStyle Style
+ switch {
+ case parsedStyle&yaml_DOUBLE_QUOTED_SCALAR_STYLE != 0:
+ nodeStyle = DoubleQuotedStyle
+ case parsedStyle&yaml_SINGLE_QUOTED_SCALAR_STYLE != 0:
+ nodeStyle = SingleQuotedStyle
+ case parsedStyle&yaml_LITERAL_SCALAR_STYLE != 0:
+ nodeStyle = LiteralStyle
+ case parsedStyle&yaml_FOLDED_SCALAR_STYLE != 0:
+ nodeStyle = FoldedStyle
+ }
+ var nodeValue = string(p.event.value)
+ var nodeTag = string(p.event.tag)
+ var defaultTag string
+ if nodeStyle == 0 {
+ if nodeValue == "<<" {
+ defaultTag = mergeTag
+ }
+ } else {
+ defaultTag = strTag
+ }
+ n := p.node(ScalarNode, defaultTag, nodeTag, nodeValue)
+ n.Style |= nodeStyle
+ p.anchor(n, p.event.anchor)
+ p.expect(yaml_SCALAR_EVENT)
+ return n
+}
+
+func (p *parser) sequence() *Node {
+ n := p.node(SequenceNode, seqTag, string(p.event.tag), "")
+ if p.event.sequence_style()&yaml_FLOW_SEQUENCE_STYLE != 0 {
+ n.Style |= FlowStyle
+ }
+ p.anchor(n, p.event.anchor)
+ p.expect(yaml_SEQUENCE_START_EVENT)
+ for p.peek() != yaml_SEQUENCE_END_EVENT {
+ p.parseChild(n)
+ }
+ n.LineComment = string(p.event.line_comment)
+ n.FootComment = string(p.event.foot_comment)
+ p.expect(yaml_SEQUENCE_END_EVENT)
+ return n
+}
+
+func (p *parser) mapping() *Node {
+ n := p.node(MappingNode, mapTag, string(p.event.tag), "")
+ block := true
+ if p.event.mapping_style()&yaml_FLOW_MAPPING_STYLE != 0 {
+ block = false
+ n.Style |= FlowStyle
+ }
+ p.anchor(n, p.event.anchor)
+ p.expect(yaml_MAPPING_START_EVENT)
+ for p.peek() != yaml_MAPPING_END_EVENT {
+ k := p.parseChild(n)
+ if block && k.FootComment != "" {
+ // Must be a foot comment for the prior value when being dedented.
+ if len(n.Content) > 2 {
+ n.Content[len(n.Content)-3].FootComment = k.FootComment
+ k.FootComment = ""
+ }
+ }
+ v := p.parseChild(n)
+ if k.FootComment == "" && v.FootComment != "" {
+ k.FootComment = v.FootComment
+ v.FootComment = ""
+ }
+ if p.peek() == yaml_TAIL_COMMENT_EVENT {
+ if k.FootComment == "" {
+ k.FootComment = string(p.event.foot_comment)
+ }
+ p.expect(yaml_TAIL_COMMENT_EVENT)
+ }
+ }
+ n.LineComment = string(p.event.line_comment)
+ n.FootComment = string(p.event.foot_comment)
+ if n.Style&FlowStyle == 0 && n.FootComment != "" && len(n.Content) > 1 {
+ n.Content[len(n.Content)-2].FootComment = n.FootComment
+ n.FootComment = ""
+ }
+ p.expect(yaml_MAPPING_END_EVENT)
+ return n
+}
+
+// ----------------------------------------------------------------------------
+// Decoder, unmarshals a node into a provided value.
+
+type decoder struct {
+ doc *Node
+ aliases map[*Node]bool
+ terrors []string
+
+ stringMapType reflect.Type
+ generalMapType reflect.Type
+
+ knownFields bool
+ uniqueKeys bool
+ decodeCount int
+ aliasCount int
+ aliasDepth int
+
+ mergedFields map[interface{}]bool
+}
+
+var (
+ nodeType = reflect.TypeOf(Node{})
+ durationType = reflect.TypeOf(time.Duration(0))
+ stringMapType = reflect.TypeOf(map[string]interface{}{})
+ generalMapType = reflect.TypeOf(map[interface{}]interface{}{})
+ ifaceType = generalMapType.Elem()
+ timeType = reflect.TypeOf(time.Time{})
+ ptrTimeType = reflect.TypeOf(&time.Time{})
+)
+
+func newDecoder() *decoder {
+ d := &decoder{
+ stringMapType: stringMapType,
+ generalMapType: generalMapType,
+ uniqueKeys: true,
+ }
+ d.aliases = make(map[*Node]bool)
+ return d
+}
+
+func (d *decoder) terror(n *Node, tag string, out reflect.Value) {
+ if n.Tag != "" {
+ tag = n.Tag
+ }
+ value := n.Value
+ if tag != seqTag && tag != mapTag {
+ if len(value) > 10 {
+ value = " `" + value[:7] + "...`"
+ } else {
+ value = " `" + value + "`"
+ }
+ }
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.Line, shortTag(tag), value, out.Type()))
+}
+
+func (d *decoder) callUnmarshaler(n *Node, u Unmarshaler) (good bool) {
+ err := u.UnmarshalYAML(n)
+ if e, ok := err.(*TypeError); ok {
+ d.terrors = append(d.terrors, e.Errors...)
+ return false
+ }
+ if err != nil {
+ fail(err)
+ }
+ return true
+}
+
+func (d *decoder) callObsoleteUnmarshaler(n *Node, u obsoleteUnmarshaler) (good bool) {
+ terrlen := len(d.terrors)
+ err := u.UnmarshalYAML(func(v interface{}) (err error) {
+ defer handleErr(&err)
+ d.unmarshal(n, reflect.ValueOf(v))
+ if len(d.terrors) > terrlen {
+ issues := d.terrors[terrlen:]
+ d.terrors = d.terrors[:terrlen]
+ return &TypeError{issues}
+ }
+ return nil
+ })
+ if e, ok := err.(*TypeError); ok {
+ d.terrors = append(d.terrors, e.Errors...)
+ return false
+ }
+ if err != nil {
+ fail(err)
+ }
+ return true
+}
+
+// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
+// if a value is found to implement it.
+// It returns the initialized and dereferenced out value, whether
+// unmarshalling was already done by UnmarshalYAML, and if so whether
+// its types unmarshalled appropriately.
+//
+// If n holds a null value, prepare returns before doing anything.
+func (d *decoder) prepare(n *Node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
+ if n.ShortTag() == nullTag {
+ return out, false, false
+ }
+ again := true
+ for again {
+ again = false
+ if out.Kind() == reflect.Ptr {
+ if out.IsNil() {
+ out.Set(reflect.New(out.Type().Elem()))
+ }
+ out = out.Elem()
+ again = true
+ }
+ if out.CanAddr() {
+ outi := out.Addr().Interface()
+ if u, ok := outi.(Unmarshaler); ok {
+ good = d.callUnmarshaler(n, u)
+ return out, true, good
+ }
+ if u, ok := outi.(obsoleteUnmarshaler); ok {
+ good = d.callObsoleteUnmarshaler(n, u)
+ return out, true, good
+ }
+ }
+ }
+ return out, false, false
+}
+
+func (d *decoder) fieldByIndex(n *Node, v reflect.Value, index []int) (field reflect.Value) {
+ if n.ShortTag() == nullTag {
+ return reflect.Value{}
+ }
+ for _, num := range index {
+ for {
+ if v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ v = v.Elem()
+ continue
+ }
+ break
+ }
+ v = v.Field(num)
+ }
+ return v
+}
+
+const (
+ // 400,000 decode operations is ~500kb of dense object declarations, or
+ // ~5kb of dense object declarations with 10000% alias expansion
+ alias_ratio_range_low = 400000
+
+ // 4,000,000 decode operations is ~5MB of dense object declarations, or
+ // ~4.5MB of dense object declarations with 10% alias expansion
+ alias_ratio_range_high = 4000000
+
+ // alias_ratio_range is the range over which we scale allowed alias ratios
+ alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low)
+)
+
+func allowedAliasRatio(decodeCount int) float64 {
+ switch {
+ case decodeCount <= alias_ratio_range_low:
+ // allow 99% to come from alias expansion for small-to-medium documents
+ return 0.99
+ case decodeCount >= alias_ratio_range_high:
+ // allow 10% to come from alias expansion for very large documents
+ return 0.10
+ default:
+ // scale smoothly from 99% down to 10% over the range.
+ // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range.
+ // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps).
+ return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range)
+ }
+}
+
+func (d *decoder) unmarshal(n *Node, out reflect.Value) (good bool) {
+ d.decodeCount++
+ if d.aliasDepth > 0 {
+ d.aliasCount++
+ }
+ if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) {
+ failf("document contains excessive aliasing")
+ }
+ if out.Type() == nodeType {
+ out.Set(reflect.ValueOf(n).Elem())
+ return true
+ }
+ switch n.Kind {
+ case DocumentNode:
+ return d.document(n, out)
+ case AliasNode:
+ return d.alias(n, out)
+ }
+ out, unmarshaled, good := d.prepare(n, out)
+ if unmarshaled {
+ return good
+ }
+ switch n.Kind {
+ case ScalarNode:
+ good = d.scalar(n, out)
+ case MappingNode:
+ good = d.mapping(n, out)
+ case SequenceNode:
+ good = d.sequence(n, out)
+ case 0:
+ if n.IsZero() {
+ return d.null(out)
+ }
+ fallthrough
+ default:
+ failf("cannot decode node with unknown kind %d", n.Kind)
+ }
+ return good
+}
+
+func (d *decoder) document(n *Node, out reflect.Value) (good bool) {
+ if len(n.Content) == 1 {
+ d.doc = n
+ d.unmarshal(n.Content[0], out)
+ return true
+ }
+ return false
+}
+
+func (d *decoder) alias(n *Node, out reflect.Value) (good bool) {
+ if d.aliases[n] {
+ // TODO this could actually be allowed in some circumstances.
+ failf("anchor '%s' value contains itself", n.Value)
+ }
+ d.aliases[n] = true
+ d.aliasDepth++
+ good = d.unmarshal(n.Alias, out)
+ d.aliasDepth--
+ delete(d.aliases, n)
+ return good
+}
+
+var zeroValue reflect.Value
+
+func resetMap(out reflect.Value) {
+ for _, k := range out.MapKeys() {
+ out.SetMapIndex(k, zeroValue)
+ }
+}
+
+func (d *decoder) null(out reflect.Value) bool {
+ if out.CanAddr() {
+ switch out.Kind() {
+ case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
+ out.Set(reflect.Zero(out.Type()))
+ return true
+ }
+ }
+ return false
+}
+
+func (d *decoder) scalar(n *Node, out reflect.Value) bool {
+ var tag string
+ var resolved interface{}
+ if n.indicatedString() {
+ tag = strTag
+ resolved = n.Value
+ } else {
+ tag, resolved = resolve(n.Tag, n.Value)
+ if tag == binaryTag {
+ data, err := base64.StdEncoding.DecodeString(resolved.(string))
+ if err != nil {
+ failf("!!binary value contains invalid base64 data")
+ }
+ resolved = string(data)
+ }
+ }
+ if resolved == nil {
+ return d.null(out)
+ }
+ if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
+ // We've resolved to exactly the type we want, so use that.
+ out.Set(resolvedv)
+ return true
+ }
+ // Perhaps we can use the value as a TextUnmarshaler to
+ // set its value.
+ if out.CanAddr() {
+ u, ok := out.Addr().Interface().(encoding.TextUnmarshaler)
+ if ok {
+ var text []byte
+ if tag == binaryTag {
+ text = []byte(resolved.(string))
+ } else {
+ // We let any value be unmarshaled into TextUnmarshaler.
+ // That might be more lax than we'd like, but the
+ // TextUnmarshaler itself should bowl out any dubious values.
+ text = []byte(n.Value)
+ }
+ err := u.UnmarshalText(text)
+ if err != nil {
+ fail(err)
+ }
+ return true
+ }
+ }
+ switch out.Kind() {
+ case reflect.String:
+ if tag == binaryTag {
+ out.SetString(resolved.(string))
+ return true
+ }
+ out.SetString(n.Value)
+ return true
+ case reflect.Interface:
+ out.Set(reflect.ValueOf(resolved))
+ return true
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ // This used to work in v2, but it's very unfriendly.
+ isDuration := out.Type() == durationType
+
+ switch resolved := resolved.(type) {
+ case int:
+ if !isDuration && !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ return true
+ }
+ case int64:
+ if !isDuration && !out.OverflowInt(resolved) {
+ out.SetInt(resolved)
+ return true
+ }
+ case uint64:
+ if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ return true
+ }
+ case float64:
+ if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ return true
+ }
+ case string:
+ if out.Type() == durationType {
+ d, err := time.ParseDuration(resolved)
+ if err == nil {
+ out.SetInt(int64(d))
+ return true
+ }
+ }
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ switch resolved := resolved.(type) {
+ case int:
+ if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ case int64:
+ if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ case uint64:
+ if !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ case float64:
+ if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ }
+ case reflect.Bool:
+ switch resolved := resolved.(type) {
+ case bool:
+ out.SetBool(resolved)
+ return true
+ case string:
+ // This offers some compatibility with the 1.1 spec (https://yaml.org/type/bool.html).
+ // It only works if explicitly attempting to unmarshal into a typed bool value.
+ switch resolved {
+ case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON":
+ out.SetBool(true)
+ return true
+ case "n", "N", "no", "No", "NO", "off", "Off", "OFF":
+ out.SetBool(false)
+ return true
+ }
+ }
+ case reflect.Float32, reflect.Float64:
+ switch resolved := resolved.(type) {
+ case int:
+ out.SetFloat(float64(resolved))
+ return true
+ case int64:
+ out.SetFloat(float64(resolved))
+ return true
+ case uint64:
+ out.SetFloat(float64(resolved))
+ return true
+ case float64:
+ out.SetFloat(resolved)
+ return true
+ }
+ case reflect.Struct:
+ if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
+ out.Set(resolvedv)
+ return true
+ }
+ case reflect.Ptr:
+ panic("yaml internal error: please report the issue")
+ }
+ d.terror(n, tag, out)
+ return false
+}
+
+func settableValueOf(i interface{}) reflect.Value {
+ v := reflect.ValueOf(i)
+ sv := reflect.New(v.Type()).Elem()
+ sv.Set(v)
+ return sv
+}
+
+func (d *decoder) sequence(n *Node, out reflect.Value) (good bool) {
+ l := len(n.Content)
+
+ var iface reflect.Value
+ switch out.Kind() {
+ case reflect.Slice:
+ out.Set(reflect.MakeSlice(out.Type(), l, l))
+ case reflect.Array:
+ if l != out.Len() {
+ failf("invalid array: want %d elements but got %d", out.Len(), l)
+ }
+ case reflect.Interface:
+ // No type hints. Will have to use a generic sequence.
+ iface = out
+ out = settableValueOf(make([]interface{}, l))
+ default:
+ d.terror(n, seqTag, out)
+ return false
+ }
+ et := out.Type().Elem()
+
+ j := 0
+ for i := 0; i < l; i++ {
+ e := reflect.New(et).Elem()
+ if ok := d.unmarshal(n.Content[i], e); ok {
+ out.Index(j).Set(e)
+ j++
+ }
+ }
+ if out.Kind() != reflect.Array {
+ out.Set(out.Slice(0, j))
+ }
+ if iface.IsValid() {
+ iface.Set(out)
+ }
+ return true
+}
+
+func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) {
+ l := len(n.Content)
+ if d.uniqueKeys {
+ nerrs := len(d.terrors)
+ for i := 0; i < l; i += 2 {
+ ni := n.Content[i]
+ for j := i + 2; j < l; j += 2 {
+ nj := n.Content[j]
+ if ni.Kind == nj.Kind && ni.Value == nj.Value {
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: mapping key %#v already defined at line %d", nj.Line, nj.Value, ni.Line))
+ }
+ }
+ }
+ if len(d.terrors) > nerrs {
+ return false
+ }
+ }
+ switch out.Kind() {
+ case reflect.Struct:
+ return d.mappingStruct(n, out)
+ case reflect.Map:
+ // okay
+ case reflect.Interface:
+ iface := out
+ if isStringMap(n) {
+ out = reflect.MakeMap(d.stringMapType)
+ } else {
+ out = reflect.MakeMap(d.generalMapType)
+ }
+ iface.Set(out)
+ default:
+ d.terror(n, mapTag, out)
+ return false
+ }
+
+ outt := out.Type()
+ kt := outt.Key()
+ et := outt.Elem()
+
+ stringMapType := d.stringMapType
+ generalMapType := d.generalMapType
+ if outt.Elem() == ifaceType {
+ if outt.Key().Kind() == reflect.String {
+ d.stringMapType = outt
+ } else if outt.Key() == ifaceType {
+ d.generalMapType = outt
+ }
+ }
+
+ mergedFields := d.mergedFields
+ d.mergedFields = nil
+
+ var mergeNode *Node
+
+ mapIsNew := false
+ if out.IsNil() {
+ out.Set(reflect.MakeMap(outt))
+ mapIsNew = true
+ }
+ for i := 0; i < l; i += 2 {
+ if isMerge(n.Content[i]) {
+ mergeNode = n.Content[i+1]
+ continue
+ }
+ k := reflect.New(kt).Elem()
+ if d.unmarshal(n.Content[i], k) {
+ if mergedFields != nil {
+ ki := k.Interface()
+ if d.getPossiblyUnhashableKey(mergedFields, ki) {
+ continue
+ }
+ d.setPossiblyUnhashableKey(mergedFields, ki, true)
+ }
+ kkind := k.Kind()
+ if kkind == reflect.Interface {
+ kkind = k.Elem().Kind()
+ }
+ if kkind == reflect.Map || kkind == reflect.Slice {
+ failf("invalid map key: %#v", k.Interface())
+ }
+ e := reflect.New(et).Elem()
+ if d.unmarshal(n.Content[i+1], e) || n.Content[i+1].ShortTag() == nullTag && (mapIsNew || !out.MapIndex(k).IsValid()) {
+ out.SetMapIndex(k, e)
+ }
+ }
+ }
+
+ d.mergedFields = mergedFields
+ if mergeNode != nil {
+ d.merge(n, mergeNode, out)
+ }
+
+ d.stringMapType = stringMapType
+ d.generalMapType = generalMapType
+ return true
+}
+
+func isStringMap(n *Node) bool {
+ if n.Kind != MappingNode {
+ return false
+ }
+ l := len(n.Content)
+ for i := 0; i < l; i += 2 {
+ shortTag := n.Content[i].ShortTag()
+ if shortTag != strTag && shortTag != mergeTag {
+ return false
+ }
+ }
+ return true
+}
+
+func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) {
+ sinfo, err := getStructInfo(out.Type())
+ if err != nil {
+ panic(err)
+ }
+
+ var inlineMap reflect.Value
+ var elemType reflect.Type
+ if sinfo.InlineMap != -1 {
+ inlineMap = out.Field(sinfo.InlineMap)
+ elemType = inlineMap.Type().Elem()
+ }
+
+ for _, index := range sinfo.InlineUnmarshalers {
+ field := d.fieldByIndex(n, out, index)
+ d.prepare(n, field)
+ }
+
+ mergedFields := d.mergedFields
+ d.mergedFields = nil
+ var mergeNode *Node
+ var doneFields []bool
+ if d.uniqueKeys {
+ doneFields = make([]bool, len(sinfo.FieldsList))
+ }
+ name := settableValueOf("")
+ l := len(n.Content)
+ for i := 0; i < l; i += 2 {
+ ni := n.Content[i]
+ if isMerge(ni) {
+ mergeNode = n.Content[i+1]
+ continue
+ }
+ if !d.unmarshal(ni, name) {
+ continue
+ }
+ sname := name.String()
+ if mergedFields != nil {
+ if mergedFields[sname] {
+ continue
+ }
+ mergedFields[sname] = true
+ }
+ if info, ok := sinfo.FieldsMap[sname]; ok {
+ if d.uniqueKeys {
+ if doneFields[info.Id] {
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type()))
+ continue
+ }
+ doneFields[info.Id] = true
+ }
+ var field reflect.Value
+ if info.Inline == nil {
+ field = out.Field(info.Num)
+ } else {
+ field = d.fieldByIndex(n, out, info.Inline)
+ }
+ d.unmarshal(n.Content[i+1], field)
+ } else if sinfo.InlineMap != -1 {
+ if inlineMap.IsNil() {
+ inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
+ }
+ value := reflect.New(elemType).Elem()
+ d.unmarshal(n.Content[i+1], value)
+ inlineMap.SetMapIndex(name, value)
+ } else if d.knownFields {
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type()))
+ }
+ }
+
+ d.mergedFields = mergedFields
+ if mergeNode != nil {
+ d.merge(n, mergeNode, out)
+ }
+ return true
+}
+
+func failWantMap() {
+ failf("map merge requires map or sequence of maps as the value")
+}
+
+func (d *decoder) setPossiblyUnhashableKey(m map[interface{}]bool, key interface{}, value bool) {
+ defer func() {
+ if err := recover(); err != nil {
+ failf("%v", err)
+ }
+ }()
+ m[key] = value
+}
+
+func (d *decoder) getPossiblyUnhashableKey(m map[interface{}]bool, key interface{}) bool {
+ defer func() {
+ if err := recover(); err != nil {
+ failf("%v", err)
+ }
+ }()
+ return m[key]
+}
+
+func (d *decoder) merge(parent *Node, merge *Node, out reflect.Value) {
+ mergedFields := d.mergedFields
+ if mergedFields == nil {
+ d.mergedFields = make(map[interface{}]bool)
+ for i := 0; i < len(parent.Content); i += 2 {
+ k := reflect.New(ifaceType).Elem()
+ if d.unmarshal(parent.Content[i], k) {
+ d.setPossiblyUnhashableKey(d.mergedFields, k.Interface(), true)
+ }
+ }
+ }
+
+ switch merge.Kind {
+ case MappingNode:
+ d.unmarshal(merge, out)
+ case AliasNode:
+ if merge.Alias != nil && merge.Alias.Kind != MappingNode {
+ failWantMap()
+ }
+ d.unmarshal(merge, out)
+ case SequenceNode:
+ for i := 0; i < len(merge.Content); i++ {
+ ni := merge.Content[i]
+ if ni.Kind == AliasNode {
+ if ni.Alias != nil && ni.Alias.Kind != MappingNode {
+ failWantMap()
+ }
+ } else if ni.Kind != MappingNode {
+ failWantMap()
+ }
+ d.unmarshal(ni, out)
+ }
+ default:
+ failWantMap()
+ }
+
+ d.mergedFields = mergedFields
+}
+
+func isMerge(n *Node) bool {
+ return n.Kind == ScalarNode && n.Value == "<<" && (n.Tag == "" || n.Tag == "!" || shortTag(n.Tag) == mergeTag)
+}
diff --git a/vendor/go.yaml.in/yaml/v3/emitterc.go b/vendor/go.yaml.in/yaml/v3/emitterc.go
new file mode 100644
index 000000000..ab4e03ba7
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v3/emitterc.go
@@ -0,0 +1,2054 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// Flush the buffer if needed.
+func flush(emitter *yaml_emitter_t) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) {
+ return yaml_emitter_flush(emitter)
+ }
+ return true
+}
+
+// Put a character to the output buffer.
+func put(emitter *yaml_emitter_t, value byte) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.buffer[emitter.buffer_pos] = value
+ emitter.buffer_pos++
+ emitter.column++
+ return true
+}
+
+// Put a line break to the output buffer.
+func put_break(emitter *yaml_emitter_t) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ switch emitter.line_break {
+ case yaml_CR_BREAK:
+ emitter.buffer[emitter.buffer_pos] = '\r'
+ emitter.buffer_pos += 1
+ case yaml_LN_BREAK:
+ emitter.buffer[emitter.buffer_pos] = '\n'
+ emitter.buffer_pos += 1
+ case yaml_CRLN_BREAK:
+ emitter.buffer[emitter.buffer_pos+0] = '\r'
+ emitter.buffer[emitter.buffer_pos+1] = '\n'
+ emitter.buffer_pos += 2
+ default:
+ panic("unknown line break setting")
+ }
+ if emitter.column == 0 {
+ emitter.space_above = true
+ }
+ emitter.column = 0
+ emitter.line++
+ // [Go] Do this here and below and drop from everywhere else (see commented lines).
+ emitter.indention = true
+ return true
+}
+
+// Copy a character from a string into buffer.
+func write(emitter *yaml_emitter_t, s []byte, i *int) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ p := emitter.buffer_pos
+ w := width(s[*i])
+ switch w {
+ case 4:
+ emitter.buffer[p+3] = s[*i+3]
+ fallthrough
+ case 3:
+ emitter.buffer[p+2] = s[*i+2]
+ fallthrough
+ case 2:
+ emitter.buffer[p+1] = s[*i+1]
+ fallthrough
+ case 1:
+ emitter.buffer[p+0] = s[*i+0]
+ default:
+ panic("unknown character width")
+ }
+ emitter.column++
+ emitter.buffer_pos += w
+ *i += w
+ return true
+}
+
+// Write a whole string into buffer.
+func write_all(emitter *yaml_emitter_t, s []byte) bool {
+ for i := 0; i < len(s); {
+ if !write(emitter, s, &i) {
+ return false
+ }
+ }
+ return true
+}
+
+// Copy a line break character from a string into buffer.
+func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool {
+ if s[*i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ *i++
+ } else {
+ if !write(emitter, s, i) {
+ return false
+ }
+ if emitter.column == 0 {
+ emitter.space_above = true
+ }
+ emitter.column = 0
+ emitter.line++
+ // [Go] Do this here and above and drop from everywhere else (see commented lines).
+ emitter.indention = true
+ }
+ return true
+}
+
+// Set an emitter error and return false.
+func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool {
+ emitter.error = yaml_EMITTER_ERROR
+ emitter.problem = problem
+ return false
+}
+
+// Emit an event.
+func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ emitter.events = append(emitter.events, *event)
+ for !yaml_emitter_need_more_events(emitter) {
+ event := &emitter.events[emitter.events_head]
+ if !yaml_emitter_analyze_event(emitter, event) {
+ return false
+ }
+ if !yaml_emitter_state_machine(emitter, event) {
+ return false
+ }
+ yaml_event_delete(event)
+ emitter.events_head++
+ }
+ return true
+}
+
+// Check if we need to accumulate more events before emitting.
+//
+// We accumulate extra
+// - 1 event for DOCUMENT-START
+// - 2 events for SEQUENCE-START
+// - 3 events for MAPPING-START
+func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool {
+ if emitter.events_head == len(emitter.events) {
+ return true
+ }
+ var accumulate int
+ switch emitter.events[emitter.events_head].typ {
+ case yaml_DOCUMENT_START_EVENT:
+ accumulate = 1
+ break
+ case yaml_SEQUENCE_START_EVENT:
+ accumulate = 2
+ break
+ case yaml_MAPPING_START_EVENT:
+ accumulate = 3
+ break
+ default:
+ return false
+ }
+ if len(emitter.events)-emitter.events_head > accumulate {
+ return false
+ }
+ var level int
+ for i := emitter.events_head; i < len(emitter.events); i++ {
+ switch emitter.events[i].typ {
+ case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT:
+ level++
+ case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT:
+ level--
+ }
+ if level == 0 {
+ return false
+ }
+ }
+ return true
+}
+
+// Append a directive to the directives stack.
+func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool {
+ for i := 0; i < len(emitter.tag_directives); i++ {
+ if bytes.Equal(value.handle, emitter.tag_directives[i].handle) {
+ if allow_duplicates {
+ return true
+ }
+ return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive")
+ }
+ }
+
+ // [Go] Do we actually need to copy this given garbage collection
+ // and the lack of deallocating destructors?
+ tag_copy := yaml_tag_directive_t{
+ handle: make([]byte, len(value.handle)),
+ prefix: make([]byte, len(value.prefix)),
+ }
+ copy(tag_copy.handle, value.handle)
+ copy(tag_copy.prefix, value.prefix)
+ emitter.tag_directives = append(emitter.tag_directives, tag_copy)
+ return true
+}
+
+// Increase the indentation level.
+func yaml_emitter_increase_indent_compact(emitter *yaml_emitter_t, flow, indentless bool, compact_seq bool) bool {
+ emitter.indents = append(emitter.indents, emitter.indent)
+ if emitter.indent < 0 {
+ if flow {
+ emitter.indent = emitter.best_indent
+ } else {
+ emitter.indent = 0
+ }
+ } else if !indentless {
+ // [Go] This was changed so that indentations are more regular.
+ if emitter.states[len(emitter.states)-1] == yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE {
+ // The first indent inside a sequence will just skip the "- " indicator.
+ emitter.indent += 2
+ } else {
+ // Everything else aligns to the chosen indentation.
+ emitter.indent = emitter.best_indent * ((emitter.indent + emitter.best_indent) / emitter.best_indent)
+ if compact_seq {
+ // The value compact_seq passed in is almost always set to `false` when this function is called,
+ // except when we are dealing with sequence nodes. So this gets triggered to subtract 2 only when we
+ // are increasing the indent to account for sequence nodes, which will be correct because we need to
+ // subtract 2 to account for the - at the beginning of the sequence node.
+ emitter.indent = emitter.indent - 2
+ }
+ }
+ }
+ return true
+}
+
+// State dispatcher.
+func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ switch emitter.state {
+ default:
+ case yaml_EMIT_STREAM_START_STATE:
+ return yaml_emitter_emit_stream_start(emitter, event)
+
+ case yaml_EMIT_FIRST_DOCUMENT_START_STATE:
+ return yaml_emitter_emit_document_start(emitter, event, true)
+
+ case yaml_EMIT_DOCUMENT_START_STATE:
+ return yaml_emitter_emit_document_start(emitter, event, false)
+
+ case yaml_EMIT_DOCUMENT_CONTENT_STATE:
+ return yaml_emitter_emit_document_content(emitter, event)
+
+ case yaml_EMIT_DOCUMENT_END_STATE:
+ return yaml_emitter_emit_document_end(emitter, event)
+
+ case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, true, false)
+
+ case yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, false, true)
+
+ case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, false, false)
+
+ case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, true, false)
+
+ case yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, false, true)
+
+ case yaml_EMIT_FLOW_MAPPING_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, false, false)
+
+ case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE:
+ return yaml_emitter_emit_flow_mapping_value(emitter, event, true)
+
+ case yaml_EMIT_FLOW_MAPPING_VALUE_STATE:
+ return yaml_emitter_emit_flow_mapping_value(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE:
+ return yaml_emitter_emit_block_sequence_item(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE:
+ return yaml_emitter_emit_block_sequence_item(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return yaml_emitter_emit_block_mapping_key(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_MAPPING_KEY_STATE:
+ return yaml_emitter_emit_block_mapping_key(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE:
+ return yaml_emitter_emit_block_mapping_value(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE:
+ return yaml_emitter_emit_block_mapping_value(emitter, event, false)
+
+ case yaml_EMIT_END_STATE:
+ return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END")
+ }
+ panic("invalid emitter state")
+}
+
+// Expect STREAM-START.
+func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if event.typ != yaml_STREAM_START_EVENT {
+ return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START")
+ }
+ if emitter.encoding == yaml_ANY_ENCODING {
+ emitter.encoding = event.encoding
+ if emitter.encoding == yaml_ANY_ENCODING {
+ emitter.encoding = yaml_UTF8_ENCODING
+ }
+ }
+ if emitter.best_indent < 2 || emitter.best_indent > 9 {
+ emitter.best_indent = 2
+ }
+ if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 {
+ emitter.best_width = 80
+ }
+ if emitter.best_width < 0 {
+ emitter.best_width = 1<<31 - 1
+ }
+ if emitter.line_break == yaml_ANY_BREAK {
+ emitter.line_break = yaml_LN_BREAK
+ }
+
+ emitter.indent = -1
+ emitter.line = 0
+ emitter.column = 0
+ emitter.whitespace = true
+ emitter.indention = true
+ emitter.space_above = true
+ emitter.foot_indent = -1
+
+ if emitter.encoding != yaml_UTF8_ENCODING {
+ if !yaml_emitter_write_bom(emitter) {
+ return false
+ }
+ }
+ emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE
+ return true
+}
+
+// Expect DOCUMENT-START or STREAM-END.
+func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+
+ if event.typ == yaml_DOCUMENT_START_EVENT {
+
+ if event.version_directive != nil {
+ if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) {
+ return false
+ }
+ }
+
+ for i := 0; i < len(event.tag_directives); i++ {
+ tag_directive := &event.tag_directives[i]
+ if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) {
+ return false
+ }
+ if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) {
+ return false
+ }
+ }
+
+ for i := 0; i < len(default_tag_directives); i++ {
+ tag_directive := &default_tag_directives[i]
+ if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) {
+ return false
+ }
+ }
+
+ implicit := event.implicit
+ if !first || emitter.canonical {
+ implicit = false
+ }
+
+ if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) {
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if event.version_directive != nil {
+ implicit = false
+ if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if len(event.tag_directives) > 0 {
+ implicit = false
+ for i := 0; i < len(event.tag_directives); i++ {
+ tag_directive := &event.tag_directives[i]
+ if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) {
+ return false
+ }
+ if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ }
+
+ if yaml_emitter_check_empty_document(emitter) {
+ implicit = false
+ }
+ if !implicit {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) {
+ return false
+ }
+ if emitter.canonical || true {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ }
+
+ if len(emitter.head_comment) > 0 {
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ if !put_break(emitter) {
+ return false
+ }
+ }
+
+ emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE
+ return true
+ }
+
+ if event.typ == yaml_STREAM_END_EVENT {
+ if emitter.open_ended {
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.state = yaml_EMIT_END_STATE
+ return true
+ }
+
+ return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END")
+}
+
+// yaml_emitter_increase_indent preserves the original signature and delegates to
+// yaml_emitter_increase_indent_compact without compact-sequence indentation
+func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool {
+ return yaml_emitter_increase_indent_compact(emitter, flow, indentless, false)
+}
+
+// yaml_emitter_process_line_comment preserves the original signature and delegates to
+// yaml_emitter_process_line_comment_linebreak passing false for linebreak
+func yaml_emitter_process_line_comment(emitter *yaml_emitter_t) bool {
+ return yaml_emitter_process_line_comment_linebreak(emitter, false)
+}
+
+// Expect the root node.
+func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE)
+
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_emit_node(emitter, event, true, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ return true
+}
+
+// Expect DOCUMENT-END.
+func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if event.typ != yaml_DOCUMENT_END_EVENT {
+ return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END")
+ }
+ // [Go] Force document foot separation.
+ emitter.foot_indent = 0
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ emitter.foot_indent = -1
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !event.implicit {
+ // [Go] Allocate the slice elsewhere.
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.state = yaml_EMIT_DOCUMENT_START_STATE
+ emitter.tag_directives = emitter.tag_directives[:0]
+ return true
+}
+
+// Expect a flow item node.
+func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool {
+ if first {
+ if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ emitter.flow_level++
+ }
+
+ if event.typ == yaml_SEQUENCE_END_EVENT {
+ if emitter.canonical && !first && !trail {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+ emitter.flow_level--
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ if emitter.column == 0 || emitter.canonical && !first {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+
+ return true
+ }
+
+ if !first && !trail {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ if emitter.column == 0 {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE)
+ } else {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE)
+ }
+ if !yaml_emitter_emit_node(emitter, event, false, true, false, false) {
+ return false
+ }
+ if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ return true
+}
+
+// Expect a flow key node.
+func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool {
+ if first {
+ if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ emitter.flow_level++
+ }
+
+ if event.typ == yaml_MAPPING_END_EVENT {
+ if (emitter.canonical || len(emitter.head_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0) && !first && !trail {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ emitter.flow_level--
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ if emitter.canonical && !first {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+
+ if !first && !trail {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+
+ if emitter.column == 0 {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if !emitter.canonical && yaml_emitter_check_simple_key(emitter) {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a flow value node.
+func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+ if simple {
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+ return false
+ }
+ } else {
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) {
+ return false
+ }
+ }
+ if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE)
+ } else {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE)
+ }
+ if !yaml_emitter_emit_node(emitter, event, false, false, true, false) {
+ return false
+ }
+ if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ return true
+}
+
+// Expect a block item node.
+func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ // emitter.mapping context tells us if we are currently in a mapping context.
+ // emiiter.column tells us which column we are in in the yaml output. 0 is the first char of the column.
+ // emitter.indentation tells us if the last character was an indentation character.
+ // emitter.compact_sequence_indent tells us if '- ' is considered part of the indentation for sequence elements.
+ // So, `seq` means that we are in a mapping context, and we are either at the first char of the column or
+ // the last character was not an indentation character, and we consider '- ' part of the indentation
+ // for sequence elements.
+ seq := emitter.mapping_context && (emitter.column == 0 || !emitter.indention) &&
+ emitter.compact_sequence_indent
+ if !yaml_emitter_increase_indent_compact(emitter, false, false, seq) {
+ return false
+ }
+ }
+ if event.typ == yaml_SEQUENCE_END_EVENT {
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE)
+ if !yaml_emitter_emit_node(emitter, event, false, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ return true
+}
+
+// Expect a block key node.
+func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_increase_indent(emitter, false, false) {
+ return false
+ }
+ }
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ if event.typ == yaml_MAPPING_END_EVENT {
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if len(emitter.line_comment) > 0 {
+ // [Go] A line comment was provided for the key. That's unusual as the
+ // scanner associates line comments with the value. Either way,
+ // save the line comment and render it appropriately later.
+ emitter.key_line_comment = emitter.line_comment
+ emitter.line_comment = nil
+ }
+ if yaml_emitter_check_simple_key(emitter) {
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a block value node.
+func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+ if simple {
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+ return false
+ }
+ } else {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) {
+ return false
+ }
+ }
+ if len(emitter.key_line_comment) > 0 {
+ // [Go] Line comments are generally associated with the value, but when there's
+ // no value on the same line as a mapping key they end up attached to the
+ // key itself.
+ if event.typ == yaml_SCALAR_EVENT {
+ if len(emitter.line_comment) == 0 {
+ // A scalar is coming and it has no line comments by itself yet,
+ // so just let it handle the line comment as usual. If it has a
+ // line comment, we can't have both so the one from the key is lost.
+ emitter.line_comment = emitter.key_line_comment
+ emitter.key_line_comment = nil
+ }
+ } else if event.sequence_style() != yaml_FLOW_SEQUENCE_STYLE && (event.typ == yaml_MAPPING_START_EVENT || event.typ == yaml_SEQUENCE_START_EVENT) {
+ // An indented block follows, so write the comment right now.
+ emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE)
+ if !yaml_emitter_emit_node(emitter, event, false, false, true, false) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ return true
+}
+
+func yaml_emitter_silent_nil_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ return event.typ == yaml_SCALAR_EVENT && event.implicit && !emitter.canonical && len(emitter.scalar_data.value) == 0
+}
+
+// Expect a node.
+func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t,
+ root bool, sequence bool, mapping bool, simple_key bool) bool {
+
+ emitter.root_context = root
+ emitter.sequence_context = sequence
+ emitter.mapping_context = mapping
+ emitter.simple_key_context = simple_key
+
+ switch event.typ {
+ case yaml_ALIAS_EVENT:
+ return yaml_emitter_emit_alias(emitter, event)
+ case yaml_SCALAR_EVENT:
+ return yaml_emitter_emit_scalar(emitter, event)
+ case yaml_SEQUENCE_START_EVENT:
+ return yaml_emitter_emit_sequence_start(emitter, event)
+ case yaml_MAPPING_START_EVENT:
+ return yaml_emitter_emit_mapping_start(emitter, event)
+ default:
+ return yaml_emitter_set_emitter_error(emitter,
+ fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ))
+ }
+}
+
+// Expect ALIAS.
+func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+}
+
+// Expect SCALAR.
+func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_select_scalar_style(emitter, event) {
+ return false
+ }
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ if !yaml_emitter_process_scalar(emitter) {
+ return false
+ }
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+}
+
+// Expect SEQUENCE-START.
+func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE ||
+ yaml_emitter_check_empty_sequence(emitter) {
+ emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE
+ } else {
+ emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE
+ }
+ return true
+}
+
+// Expect MAPPING-START.
+func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE ||
+ yaml_emitter_check_empty_mapping(emitter) {
+ emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE
+ } else {
+ emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE
+ }
+ return true
+}
+
+// Check if the document content is an empty scalar.
+func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool {
+ return false // [Go] Huh?
+}
+
+// Check if the next events represent an empty sequence.
+func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool {
+ if len(emitter.events)-emitter.events_head < 2 {
+ return false
+ }
+ return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT &&
+ emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT
+}
+
+// Check if the next events represent an empty mapping.
+func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool {
+ if len(emitter.events)-emitter.events_head < 2 {
+ return false
+ }
+ return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT &&
+ emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT
+}
+
+// Check if the next node can be expressed as a simple key.
+func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool {
+ length := 0
+ switch emitter.events[emitter.events_head].typ {
+ case yaml_ALIAS_EVENT:
+ length += len(emitter.anchor_data.anchor)
+ case yaml_SCALAR_EVENT:
+ if emitter.scalar_data.multiline {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix) +
+ len(emitter.scalar_data.value)
+ case yaml_SEQUENCE_START_EVENT:
+ if !yaml_emitter_check_empty_sequence(emitter) {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix)
+ case yaml_MAPPING_START_EVENT:
+ if !yaml_emitter_check_empty_mapping(emitter) {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix)
+ default:
+ return false
+ }
+ return length <= 128
+}
+
+// Determine an acceptable scalar style.
+func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+ no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0
+ if no_tag && !event.implicit && !event.quoted_implicit {
+ return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified")
+ }
+
+ style := event.scalar_style()
+ if style == yaml_ANY_SCALAR_STYLE {
+ style = yaml_PLAIN_SCALAR_STYLE
+ }
+ if emitter.canonical {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ if emitter.simple_key_context && emitter.scalar_data.multiline {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+
+ if style == yaml_PLAIN_SCALAR_STYLE {
+ if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed ||
+ emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ if no_tag && !event.implicit {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ }
+ if style == yaml_SINGLE_QUOTED_SCALAR_STYLE {
+ if !emitter.scalar_data.single_quoted_allowed {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ }
+ if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE {
+ if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ }
+
+ if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE {
+ emitter.tag_data.handle = []byte{'!'}
+ }
+ emitter.scalar_data.style = style
+ return true
+}
+
+// Write an anchor.
+func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool {
+ if emitter.anchor_data.anchor == nil {
+ return true
+ }
+ c := []byte{'&'}
+ if emitter.anchor_data.alias {
+ c[0] = '*'
+ }
+ if !yaml_emitter_write_indicator(emitter, c, true, false, false) {
+ return false
+ }
+ return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor)
+}
+
+// Write a tag.
+func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool {
+ if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 {
+ return true
+ }
+ if len(emitter.tag_data.handle) > 0 {
+ if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) {
+ return false
+ }
+ if len(emitter.tag_data.suffix) > 0 {
+ if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+ return false
+ }
+ }
+ } else {
+ // [Go] Allocate these slices elsewhere.
+ if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) {
+ return false
+ }
+ }
+ return true
+}
+
+// Write a scalar.
+func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool {
+ switch emitter.scalar_data.style {
+ case yaml_PLAIN_SCALAR_STYLE:
+ return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_SINGLE_QUOTED_SCALAR_STYLE:
+ return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_DOUBLE_QUOTED_SCALAR_STYLE:
+ return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_LITERAL_SCALAR_STYLE:
+ return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value)
+
+ case yaml_FOLDED_SCALAR_STYLE:
+ return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value)
+ }
+ panic("unknown scalar style")
+}
+
+// Write a head comment.
+func yaml_emitter_process_head_comment(emitter *yaml_emitter_t) bool {
+ if len(emitter.tail_comment) > 0 {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_comment(emitter, emitter.tail_comment) {
+ return false
+ }
+ emitter.tail_comment = emitter.tail_comment[:0]
+ emitter.foot_indent = emitter.indent
+ if emitter.foot_indent < 0 {
+ emitter.foot_indent = 0
+ }
+ }
+
+ if len(emitter.head_comment) == 0 {
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_comment(emitter, emitter.head_comment) {
+ return false
+ }
+ emitter.head_comment = emitter.head_comment[:0]
+ return true
+}
+
+// Write an line comment.
+func yaml_emitter_process_line_comment_linebreak(emitter *yaml_emitter_t, linebreak bool) bool {
+ if len(emitter.line_comment) == 0 {
+ // The next 3 lines are needed to resolve an issue with leading newlines
+ // See https://github.com/go-yaml/yaml/issues/755
+ // When linebreak is set to true, put_break will be called and will add
+ // the needed newline.
+ if linebreak && !put_break(emitter) {
+ return false
+ }
+ return true
+ }
+ if !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !yaml_emitter_write_comment(emitter, emitter.line_comment) {
+ return false
+ }
+ emitter.line_comment = emitter.line_comment[:0]
+ return true
+}
+
+// Write a foot comment.
+func yaml_emitter_process_foot_comment(emitter *yaml_emitter_t) bool {
+ if len(emitter.foot_comment) == 0 {
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_comment(emitter, emitter.foot_comment) {
+ return false
+ }
+ emitter.foot_comment = emitter.foot_comment[:0]
+ emitter.foot_indent = emitter.indent
+ if emitter.foot_indent < 0 {
+ emitter.foot_indent = 0
+ }
+ return true
+}
+
+// Check if a %YAML directive is valid.
+func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool {
+ if version_directive.major != 1 || version_directive.minor != 1 {
+ return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive")
+ }
+ return true
+}
+
+// Check if a %TAG directive is valid.
+func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool {
+ handle := tag_directive.handle
+ prefix := tag_directive.prefix
+ if len(handle) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty")
+ }
+ if handle[0] != '!' {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'")
+ }
+ if handle[len(handle)-1] != '!' {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'")
+ }
+ for i := 1; i < len(handle)-1; i += width(handle[i]) {
+ if !is_alpha(handle, i) {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only")
+ }
+ }
+ if len(prefix) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty")
+ }
+ return true
+}
+
+// Check if an anchor is valid.
+func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool {
+ if len(anchor) == 0 {
+ problem := "anchor value must not be empty"
+ if alias {
+ problem = "alias value must not be empty"
+ }
+ return yaml_emitter_set_emitter_error(emitter, problem)
+ }
+ for i := 0; i < len(anchor); i += width(anchor[i]) {
+ if !is_alpha(anchor, i) {
+ problem := "anchor value must contain alphanumerical characters only"
+ if alias {
+ problem = "alias value must contain alphanumerical characters only"
+ }
+ return yaml_emitter_set_emitter_error(emitter, problem)
+ }
+ }
+ emitter.anchor_data.anchor = anchor
+ emitter.anchor_data.alias = alias
+ return true
+}
+
+// Check if a tag is valid.
+func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool {
+ if len(tag) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty")
+ }
+ for i := 0; i < len(emitter.tag_directives); i++ {
+ tag_directive := &emitter.tag_directives[i]
+ if bytes.HasPrefix(tag, tag_directive.prefix) {
+ emitter.tag_data.handle = tag_directive.handle
+ emitter.tag_data.suffix = tag[len(tag_directive.prefix):]
+ return true
+ }
+ }
+ emitter.tag_data.suffix = tag
+ return true
+}
+
+// Check if a scalar is valid.
+func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ var (
+ block_indicators = false
+ flow_indicators = false
+ line_breaks = false
+ special_characters = false
+ tab_characters = false
+
+ leading_space = false
+ leading_break = false
+ trailing_space = false
+ trailing_break = false
+ break_space = false
+ space_break = false
+
+ preceded_by_whitespace = false
+ followed_by_whitespace = false
+ previous_space = false
+ previous_break = false
+ )
+
+ emitter.scalar_data.value = value
+
+ if len(value) == 0 {
+ emitter.scalar_data.multiline = false
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = true
+ emitter.scalar_data.single_quoted_allowed = true
+ emitter.scalar_data.block_allowed = false
+ return true
+ }
+
+ if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) {
+ block_indicators = true
+ flow_indicators = true
+ }
+
+ preceded_by_whitespace = true
+ for i, w := 0, 0; i < len(value); i += w {
+ w = width(value[i])
+ followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w)
+
+ if i == 0 {
+ switch value[i] {
+ case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`':
+ flow_indicators = true
+ block_indicators = true
+ case '?', ':':
+ flow_indicators = true
+ if followed_by_whitespace {
+ block_indicators = true
+ }
+ case '-':
+ if followed_by_whitespace {
+ flow_indicators = true
+ block_indicators = true
+ }
+ }
+ } else {
+ switch value[i] {
+ case ',', '?', '[', ']', '{', '}':
+ flow_indicators = true
+ case ':':
+ flow_indicators = true
+ if followed_by_whitespace {
+ block_indicators = true
+ }
+ case '#':
+ if preceded_by_whitespace {
+ flow_indicators = true
+ block_indicators = true
+ }
+ }
+ }
+
+ if value[i] == '\t' {
+ tab_characters = true
+ } else if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode {
+ special_characters = true
+ }
+ if is_space(value, i) {
+ if i == 0 {
+ leading_space = true
+ }
+ if i+width(value[i]) == len(value) {
+ trailing_space = true
+ }
+ if previous_break {
+ break_space = true
+ }
+ previous_space = true
+ previous_break = false
+ } else if is_break(value, i) {
+ line_breaks = true
+ if i == 0 {
+ leading_break = true
+ }
+ if i+width(value[i]) == len(value) {
+ trailing_break = true
+ }
+ if previous_space {
+ space_break = true
+ }
+ previous_space = false
+ previous_break = true
+ } else {
+ previous_space = false
+ previous_break = false
+ }
+
+ // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition.
+ preceded_by_whitespace = is_blankz(value, i)
+ }
+
+ emitter.scalar_data.multiline = line_breaks
+ emitter.scalar_data.flow_plain_allowed = true
+ emitter.scalar_data.block_plain_allowed = true
+ emitter.scalar_data.single_quoted_allowed = true
+ emitter.scalar_data.block_allowed = true
+
+ if leading_space || leading_break || trailing_space || trailing_break {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ if trailing_space {
+ emitter.scalar_data.block_allowed = false
+ }
+ if break_space {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ emitter.scalar_data.single_quoted_allowed = false
+ }
+ if space_break || tab_characters || special_characters {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ emitter.scalar_data.single_quoted_allowed = false
+ }
+ if space_break || special_characters {
+ emitter.scalar_data.block_allowed = false
+ }
+ if line_breaks {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ if flow_indicators {
+ emitter.scalar_data.flow_plain_allowed = false
+ }
+ if block_indicators {
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ return true
+}
+
+// Check if the event data is valid.
+func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+ emitter.anchor_data.anchor = nil
+ emitter.tag_data.handle = nil
+ emitter.tag_data.suffix = nil
+ emitter.scalar_data.value = nil
+
+ if len(event.head_comment) > 0 {
+ emitter.head_comment = event.head_comment
+ }
+ if len(event.line_comment) > 0 {
+ emitter.line_comment = event.line_comment
+ }
+ if len(event.foot_comment) > 0 {
+ emitter.foot_comment = event.foot_comment
+ }
+ if len(event.tail_comment) > 0 {
+ emitter.tail_comment = event.tail_comment
+ }
+
+ switch event.typ {
+ case yaml_ALIAS_EVENT:
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) {
+ return false
+ }
+
+ case yaml_SCALAR_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+ if !yaml_emitter_analyze_scalar(emitter, event.value) {
+ return false
+ }
+
+ case yaml_SEQUENCE_START_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+
+ case yaml_MAPPING_START_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// Write the BOM character.
+func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool {
+ if !flush(emitter) {
+ return false
+ }
+ pos := emitter.buffer_pos
+ emitter.buffer[pos+0] = '\xEF'
+ emitter.buffer[pos+1] = '\xBB'
+ emitter.buffer[pos+2] = '\xBF'
+ emitter.buffer_pos += 3
+ return true
+}
+
+func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool {
+ indent := emitter.indent
+ if indent < 0 {
+ indent = 0
+ }
+ if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if emitter.foot_indent == indent {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ for emitter.column < indent {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ emitter.whitespace = true
+ //emitter.indention = true
+ emitter.space_above = false
+ emitter.foot_indent = -1
+ return true
+}
+
+func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool {
+ if need_whitespace && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !write_all(emitter, indicator) {
+ return false
+ }
+ emitter.whitespace = is_whitespace
+ emitter.indention = (emitter.indention && is_indention)
+ emitter.open_ended = false
+ return true
+}
+
+func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool {
+ if !write_all(emitter, value) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool {
+ if !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !write_all(emitter, value) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool {
+ if need_whitespace && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ for i := 0; i < len(value); {
+ var must_write bool
+ switch value[i] {
+ case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']':
+ must_write = true
+ default:
+ must_write = is_alpha(value, i)
+ }
+ if must_write {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ } else {
+ w := width(value[i])
+ for k := 0; k < w; k++ {
+ octet := value[i]
+ i++
+ if !put(emitter, '%') {
+ return false
+ }
+
+ c := octet >> 4
+ if c < 10 {
+ c += '0'
+ } else {
+ c += 'A' - 10
+ }
+ if !put(emitter, c) {
+ return false
+ }
+
+ c = octet & 0x0f
+ if c < 10 {
+ c += '0'
+ } else {
+ c += 'A' - 10
+ }
+ if !put(emitter, c) {
+ return false
+ }
+ }
+ }
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+ if len(value) > 0 && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+
+ spaces := false
+ breaks := false
+ for i := 0; i < len(value); {
+ if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ spaces = true
+ } else if is_break(value, i) {
+ if !breaks && value[i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ //emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ spaces = false
+ breaks = false
+ }
+ }
+
+ if len(value) > 0 {
+ emitter.whitespace = false
+ }
+ emitter.indention = false
+ if emitter.root_context {
+ emitter.open_ended = true
+ }
+
+ return true
+}
+
+func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+
+ if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) {
+ return false
+ }
+
+ spaces := false
+ breaks := false
+ for i := 0; i < len(value); {
+ if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ spaces = true
+ } else if is_break(value, i) {
+ if !breaks && value[i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ //emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if value[i] == '\'' {
+ if !put(emitter, '\'') {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ spaces = false
+ breaks = false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+ spaces := false
+ if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) {
+ return false
+ }
+
+ for i := 0; i < len(value); {
+ if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) ||
+ is_bom(value, i) || is_break(value, i) ||
+ value[i] == '"' || value[i] == '\\' {
+
+ octet := value[i]
+
+ var w int
+ var v rune
+ switch {
+ case octet&0x80 == 0x00:
+ w, v = 1, rune(octet&0x7F)
+ case octet&0xE0 == 0xC0:
+ w, v = 2, rune(octet&0x1F)
+ case octet&0xF0 == 0xE0:
+ w, v = 3, rune(octet&0x0F)
+ case octet&0xF8 == 0xF0:
+ w, v = 4, rune(octet&0x07)
+ }
+ for k := 1; k < w; k++ {
+ octet = value[i+k]
+ v = (v << 6) + (rune(octet) & 0x3F)
+ }
+ i += w
+
+ if !put(emitter, '\\') {
+ return false
+ }
+
+ var ok bool
+ switch v {
+ case 0x00:
+ ok = put(emitter, '0')
+ case 0x07:
+ ok = put(emitter, 'a')
+ case 0x08:
+ ok = put(emitter, 'b')
+ case 0x09:
+ ok = put(emitter, 't')
+ case 0x0A:
+ ok = put(emitter, 'n')
+ case 0x0b:
+ ok = put(emitter, 'v')
+ case 0x0c:
+ ok = put(emitter, 'f')
+ case 0x0d:
+ ok = put(emitter, 'r')
+ case 0x1b:
+ ok = put(emitter, 'e')
+ case 0x22:
+ ok = put(emitter, '"')
+ case 0x5c:
+ ok = put(emitter, '\\')
+ case 0x85:
+ ok = put(emitter, 'N')
+ case 0xA0:
+ ok = put(emitter, '_')
+ case 0x2028:
+ ok = put(emitter, 'L')
+ case 0x2029:
+ ok = put(emitter, 'P')
+ default:
+ if v <= 0xFF {
+ ok = put(emitter, 'x')
+ w = 2
+ } else if v <= 0xFFFF {
+ ok = put(emitter, 'u')
+ w = 4
+ } else {
+ ok = put(emitter, 'U')
+ w = 8
+ }
+ for k := (w - 1) * 4; ok && k >= 0; k -= 4 {
+ digit := byte((v >> uint(k)) & 0x0F)
+ if digit < 10 {
+ ok = put(emitter, digit+'0')
+ } else {
+ ok = put(emitter, digit+'A'-10)
+ }
+ }
+ }
+ if !ok {
+ return false
+ }
+ spaces = false
+ } else if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if is_space(value, i+1) {
+ if !put(emitter, '\\') {
+ return false
+ }
+ }
+ i += width(value[i])
+ } else if !write(emitter, value, &i) {
+ return false
+ }
+ spaces = true
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ spaces = false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool {
+ if is_space(value, 0) || is_break(value, 0) {
+ indent_hint := []byte{'0' + byte(emitter.best_indent)}
+ if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) {
+ return false
+ }
+ }
+
+ emitter.open_ended = false
+
+ var chomp_hint [1]byte
+ if len(value) == 0 {
+ chomp_hint[0] = '-'
+ } else {
+ i := len(value) - 1
+ for value[i]&0xC0 == 0x80 {
+ i--
+ }
+ if !is_break(value, i) {
+ chomp_hint[0] = '-'
+ } else if i == 0 {
+ chomp_hint[0] = '+'
+ emitter.open_ended = true
+ } else {
+ i--
+ for value[i]&0xC0 == 0x80 {
+ i--
+ }
+ if is_break(value, i) {
+ chomp_hint[0] = '+'
+ emitter.open_ended = true
+ }
+ }
+ }
+ if chomp_hint[0] != 0 {
+ if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) {
+ return false
+ }
+ }
+ return true
+}
+
+func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment_linebreak(emitter, true) {
+ return false
+ }
+ //emitter.indention = true
+ emitter.whitespace = true
+ breaks := true
+ for i := 0; i < len(value); {
+ if is_break(value, i) {
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ //emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+
+ return true
+}
+
+func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment_linebreak(emitter, true) {
+ return false
+ }
+
+ //emitter.indention = true
+ emitter.whitespace = true
+
+ breaks := true
+ leading_spaces := true
+ for i := 0; i < len(value); {
+ if is_break(value, i) {
+ if !breaks && !leading_spaces && value[i] == '\n' {
+ k := 0
+ for is_break(value, k) {
+ k += width(value[k])
+ }
+ if !is_blankz(value, k) {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ //emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ leading_spaces = is_blank(value, i)
+ }
+ if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+ return true
+}
+
+func yaml_emitter_write_comment(emitter *yaml_emitter_t, comment []byte) bool {
+ breaks := false
+ pound := false
+ for i := 0; i < len(comment); {
+ if is_break(comment, i) {
+ if !write_break(emitter, comment, &i) {
+ return false
+ }
+ //emitter.indention = true
+ breaks = true
+ pound = false
+ } else {
+ if breaks && !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !pound {
+ if comment[i] != '#' && (!put(emitter, '#') || !put(emitter, ' ')) {
+ return false
+ }
+ pound = true
+ }
+ if !write(emitter, comment, &i) {
+ return false
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+ if !breaks && !put_break(emitter) {
+ return false
+ }
+
+ emitter.whitespace = true
+ //emitter.indention = true
+ return true
+}
diff --git a/vendor/go.yaml.in/yaml/v3/encode.go b/vendor/go.yaml.in/yaml/v3/encode.go
new file mode 100644
index 000000000..de9e72a3e
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v3/encode.go
@@ -0,0 +1,577 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package yaml
+
+import (
+ "encoding"
+ "fmt"
+ "io"
+ "reflect"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+)
+
+type encoder struct {
+ emitter yaml_emitter_t
+ event yaml_event_t
+ out []byte
+ flow bool
+ indent int
+ doneInit bool
+}
+
+func newEncoder() *encoder {
+ e := &encoder{}
+ yaml_emitter_initialize(&e.emitter)
+ yaml_emitter_set_output_string(&e.emitter, &e.out)
+ yaml_emitter_set_unicode(&e.emitter, true)
+ return e
+}
+
+func newEncoderWithWriter(w io.Writer) *encoder {
+ e := &encoder{}
+ yaml_emitter_initialize(&e.emitter)
+ yaml_emitter_set_output_writer(&e.emitter, w)
+ yaml_emitter_set_unicode(&e.emitter, true)
+ return e
+}
+
+func (e *encoder) init() {
+ if e.doneInit {
+ return
+ }
+ if e.indent == 0 {
+ e.indent = 4
+ }
+ e.emitter.best_indent = e.indent
+ yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)
+ e.emit()
+ e.doneInit = true
+}
+
+func (e *encoder) finish() {
+ e.emitter.open_ended = false
+ yaml_stream_end_event_initialize(&e.event)
+ e.emit()
+}
+
+func (e *encoder) destroy() {
+ yaml_emitter_delete(&e.emitter)
+}
+
+func (e *encoder) emit() {
+ // This will internally delete the e.event value.
+ e.must(yaml_emitter_emit(&e.emitter, &e.event))
+}
+
+func (e *encoder) must(ok bool) {
+ if !ok {
+ msg := e.emitter.problem
+ if msg == "" {
+ msg = "unknown problem generating YAML content"
+ }
+ failf("%s", msg)
+ }
+}
+
+func (e *encoder) marshalDoc(tag string, in reflect.Value) {
+ e.init()
+ var node *Node
+ if in.IsValid() {
+ node, _ = in.Interface().(*Node)
+ }
+ if node != nil && node.Kind == DocumentNode {
+ e.nodev(in)
+ } else {
+ yaml_document_start_event_initialize(&e.event, nil, nil, true)
+ e.emit()
+ e.marshal(tag, in)
+ yaml_document_end_event_initialize(&e.event, true)
+ e.emit()
+ }
+}
+
+func (e *encoder) marshal(tag string, in reflect.Value) {
+ tag = shortTag(tag)
+ if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() {
+ e.nilv()
+ return
+ }
+ iface := in.Interface()
+ switch value := iface.(type) {
+ case *Node:
+ e.nodev(in)
+ return
+ case Node:
+ if !in.CanAddr() {
+ var n = reflect.New(in.Type()).Elem()
+ n.Set(in)
+ in = n
+ }
+ e.nodev(in.Addr())
+ return
+ case time.Time:
+ e.timev(tag, in)
+ return
+ case *time.Time:
+ e.timev(tag, in.Elem())
+ return
+ case time.Duration:
+ e.stringv(tag, reflect.ValueOf(value.String()))
+ return
+ case Marshaler:
+ v, err := value.MarshalYAML()
+ if err != nil {
+ fail(err)
+ }
+ if v == nil {
+ e.nilv()
+ return
+ }
+ e.marshal(tag, reflect.ValueOf(v))
+ return
+ case encoding.TextMarshaler:
+ text, err := value.MarshalText()
+ if err != nil {
+ fail(err)
+ }
+ in = reflect.ValueOf(string(text))
+ case nil:
+ e.nilv()
+ return
+ }
+ switch in.Kind() {
+ case reflect.Interface:
+ e.marshal(tag, in.Elem())
+ case reflect.Map:
+ e.mapv(tag, in)
+ case reflect.Ptr:
+ e.marshal(tag, in.Elem())
+ case reflect.Struct:
+ e.structv(tag, in)
+ case reflect.Slice, reflect.Array:
+ e.slicev(tag, in)
+ case reflect.String:
+ e.stringv(tag, in)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ e.intv(tag, in)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ e.uintv(tag, in)
+ case reflect.Float32, reflect.Float64:
+ e.floatv(tag, in)
+ case reflect.Bool:
+ e.boolv(tag, in)
+ default:
+ panic("cannot marshal type: " + in.Type().String())
+ }
+}
+
+func (e *encoder) mapv(tag string, in reflect.Value) {
+ e.mappingv(tag, func() {
+ keys := keyList(in.MapKeys())
+ sort.Sort(keys)
+ for _, k := range keys {
+ e.marshal("", k)
+ e.marshal("", in.MapIndex(k))
+ }
+ })
+}
+
+func (e *encoder) fieldByIndex(v reflect.Value, index []int) (field reflect.Value) {
+ for _, num := range index {
+ for {
+ if v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ return reflect.Value{}
+ }
+ v = v.Elem()
+ continue
+ }
+ break
+ }
+ v = v.Field(num)
+ }
+ return v
+}
+
+func (e *encoder) structv(tag string, in reflect.Value) {
+ sinfo, err := getStructInfo(in.Type())
+ if err != nil {
+ panic(err)
+ }
+ e.mappingv(tag, func() {
+ for _, info := range sinfo.FieldsList {
+ var value reflect.Value
+ if info.Inline == nil {
+ value = in.Field(info.Num)
+ } else {
+ value = e.fieldByIndex(in, info.Inline)
+ if !value.IsValid() {
+ continue
+ }
+ }
+ if info.OmitEmpty && isZero(value) {
+ continue
+ }
+ e.marshal("", reflect.ValueOf(info.Key))
+ e.flow = info.Flow
+ e.marshal("", value)
+ }
+ if sinfo.InlineMap >= 0 {
+ m := in.Field(sinfo.InlineMap)
+ if m.Len() > 0 {
+ e.flow = false
+ keys := keyList(m.MapKeys())
+ sort.Sort(keys)
+ for _, k := range keys {
+ if _, found := sinfo.FieldsMap[k.String()]; found {
+ panic(fmt.Sprintf("cannot have key %q in inlined map: conflicts with struct field", k.String()))
+ }
+ e.marshal("", k)
+ e.flow = false
+ e.marshal("", m.MapIndex(k))
+ }
+ }
+ }
+ })
+}
+
+func (e *encoder) mappingv(tag string, f func()) {
+ implicit := tag == ""
+ style := yaml_BLOCK_MAPPING_STYLE
+ if e.flow {
+ e.flow = false
+ style = yaml_FLOW_MAPPING_STYLE
+ }
+ yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
+ e.emit()
+ f()
+ yaml_mapping_end_event_initialize(&e.event)
+ e.emit()
+}
+
+func (e *encoder) slicev(tag string, in reflect.Value) {
+ implicit := tag == ""
+ style := yaml_BLOCK_SEQUENCE_STYLE
+ if e.flow {
+ e.flow = false
+ style = yaml_FLOW_SEQUENCE_STYLE
+ }
+ e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
+ e.emit()
+ n := in.Len()
+ for i := 0; i < n; i++ {
+ e.marshal("", in.Index(i))
+ }
+ e.must(yaml_sequence_end_event_initialize(&e.event))
+ e.emit()
+}
+
+// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
+//
+// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
+// in YAML 1.2 and by this package, but these should be marshalled quoted for
+// the time being for compatibility with other parsers.
+func isBase60Float(s string) (result bool) {
+ // Fast path.
+ if s == "" {
+ return false
+ }
+ c := s[0]
+ if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
+ return false
+ }
+ // Do the full match.
+ return base60float.MatchString(s)
+}
+
+// From http://yaml.org/type/float.html, except the regular expression there
+// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
+var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
+
+// isOldBool returns whether s is bool notation as defined in YAML 1.1.
+//
+// We continue to force strings that YAML 1.1 would interpret as booleans to be
+// rendered as quotes strings so that the marshalled output valid for YAML 1.1
+// parsing.
+func isOldBool(s string) (result bool) {
+ switch s {
+ case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON",
+ "n", "N", "no", "No", "NO", "off", "Off", "OFF":
+ return true
+ default:
+ return false
+ }
+}
+
+func (e *encoder) stringv(tag string, in reflect.Value) {
+ var style yaml_scalar_style_t
+ s := in.String()
+ canUsePlain := true
+ switch {
+ case !utf8.ValidString(s):
+ if tag == binaryTag {
+ failf("explicitly tagged !!binary data must be base64-encoded")
+ }
+ if tag != "" {
+ failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
+ }
+ // It can't be encoded directly as YAML so use a binary tag
+ // and encode it as base64.
+ tag = binaryTag
+ s = encodeBase64(s)
+ case tag == "":
+ // Check to see if it would resolve to a specific
+ // tag when encoded unquoted. If it doesn't,
+ // there's no need to quote it.
+ rtag, _ := resolve("", s)
+ canUsePlain = rtag == strTag && !(isBase60Float(s) || isOldBool(s))
+ }
+ // Note: it's possible for user code to emit invalid YAML
+ // if they explicitly specify a tag and a string containing
+ // text that's incompatible with that tag.
+ switch {
+ case strings.Contains(s, "\n"):
+ if e.flow {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ } else {
+ style = yaml_LITERAL_SCALAR_STYLE
+ }
+ case canUsePlain:
+ style = yaml_PLAIN_SCALAR_STYLE
+ default:
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ e.emitScalar(s, "", tag, style, nil, nil, nil, nil)
+}
+
+func (e *encoder) boolv(tag string, in reflect.Value) {
+ var s string
+ if in.Bool() {
+ s = "true"
+ } else {
+ s = "false"
+ }
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) intv(tag string, in reflect.Value) {
+ s := strconv.FormatInt(in.Int(), 10)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) uintv(tag string, in reflect.Value) {
+ s := strconv.FormatUint(in.Uint(), 10)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) timev(tag string, in reflect.Value) {
+ t := in.Interface().(time.Time)
+ s := t.Format(time.RFC3339Nano)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) floatv(tag string, in reflect.Value) {
+ // Issue #352: When formatting, use the precision of the underlying value
+ precision := 64
+ if in.Kind() == reflect.Float32 {
+ precision = 32
+ }
+
+ s := strconv.FormatFloat(in.Float(), 'g', -1, precision)
+ switch s {
+ case "+Inf":
+ s = ".inf"
+ case "-Inf":
+ s = "-.inf"
+ case "NaN":
+ s = ".nan"
+ }
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) nilv() {
+ e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t, head, line, foot, tail []byte) {
+ // TODO Kill this function. Replace all initialize calls by their underlining Go literals.
+ implicit := tag == ""
+ if !implicit {
+ tag = longTag(tag)
+ }
+ e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
+ e.event.head_comment = head
+ e.event.line_comment = line
+ e.event.foot_comment = foot
+ e.event.tail_comment = tail
+ e.emit()
+}
+
+func (e *encoder) nodev(in reflect.Value) {
+ e.node(in.Interface().(*Node), "")
+}
+
+func (e *encoder) node(node *Node, tail string) {
+ // Zero nodes behave as nil.
+ if node.Kind == 0 && node.IsZero() {
+ e.nilv()
+ return
+ }
+
+ // If the tag was not explicitly requested, and dropping it won't change the
+ // implicit tag of the value, don't include it in the presentation.
+ var tag = node.Tag
+ var stag = shortTag(tag)
+ var forceQuoting bool
+ if tag != "" && node.Style&TaggedStyle == 0 {
+ if node.Kind == ScalarNode {
+ if stag == strTag && node.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0 {
+ tag = ""
+ } else {
+ rtag, _ := resolve("", node.Value)
+ if rtag == stag {
+ tag = ""
+ } else if stag == strTag {
+ tag = ""
+ forceQuoting = true
+ }
+ }
+ } else {
+ var rtag string
+ switch node.Kind {
+ case MappingNode:
+ rtag = mapTag
+ case SequenceNode:
+ rtag = seqTag
+ }
+ if rtag == stag {
+ tag = ""
+ }
+ }
+ }
+
+ switch node.Kind {
+ case DocumentNode:
+ yaml_document_start_event_initialize(&e.event, nil, nil, true)
+ e.event.head_comment = []byte(node.HeadComment)
+ e.emit()
+ for _, node := range node.Content {
+ e.node(node, "")
+ }
+ yaml_document_end_event_initialize(&e.event, true)
+ e.event.foot_comment = []byte(node.FootComment)
+ e.emit()
+
+ case SequenceNode:
+ style := yaml_BLOCK_SEQUENCE_STYLE
+ if node.Style&FlowStyle != 0 {
+ style = yaml_FLOW_SEQUENCE_STYLE
+ }
+ e.must(yaml_sequence_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style))
+ e.event.head_comment = []byte(node.HeadComment)
+ e.emit()
+ for _, node := range node.Content {
+ e.node(node, "")
+ }
+ e.must(yaml_sequence_end_event_initialize(&e.event))
+ e.event.line_comment = []byte(node.LineComment)
+ e.event.foot_comment = []byte(node.FootComment)
+ e.emit()
+
+ case MappingNode:
+ style := yaml_BLOCK_MAPPING_STYLE
+ if node.Style&FlowStyle != 0 {
+ style = yaml_FLOW_MAPPING_STYLE
+ }
+ yaml_mapping_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style)
+ e.event.tail_comment = []byte(tail)
+ e.event.head_comment = []byte(node.HeadComment)
+ e.emit()
+
+ // The tail logic below moves the foot comment of prior keys to the following key,
+ // since the value for each key may be a nested structure and the foot needs to be
+ // processed only the entirety of the value is streamed. The last tail is processed
+ // with the mapping end event.
+ var tail string
+ for i := 0; i+1 < len(node.Content); i += 2 {
+ k := node.Content[i]
+ foot := k.FootComment
+ if foot != "" {
+ kopy := *k
+ kopy.FootComment = ""
+ k = &kopy
+ }
+ e.node(k, tail)
+ tail = foot
+
+ v := node.Content[i+1]
+ e.node(v, "")
+ }
+
+ yaml_mapping_end_event_initialize(&e.event)
+ e.event.tail_comment = []byte(tail)
+ e.event.line_comment = []byte(node.LineComment)
+ e.event.foot_comment = []byte(node.FootComment)
+ e.emit()
+
+ case AliasNode:
+ yaml_alias_event_initialize(&e.event, []byte(node.Value))
+ e.event.head_comment = []byte(node.HeadComment)
+ e.event.line_comment = []byte(node.LineComment)
+ e.event.foot_comment = []byte(node.FootComment)
+ e.emit()
+
+ case ScalarNode:
+ value := node.Value
+ if !utf8.ValidString(value) {
+ if stag == binaryTag {
+ failf("explicitly tagged !!binary data must be base64-encoded")
+ }
+ if stag != "" {
+ failf("cannot marshal invalid UTF-8 data as %s", stag)
+ }
+ // It can't be encoded directly as YAML so use a binary tag
+ // and encode it as base64.
+ tag = binaryTag
+ value = encodeBase64(value)
+ }
+
+ style := yaml_PLAIN_SCALAR_STYLE
+ switch {
+ case node.Style&DoubleQuotedStyle != 0:
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ case node.Style&SingleQuotedStyle != 0:
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ case node.Style&LiteralStyle != 0:
+ style = yaml_LITERAL_SCALAR_STYLE
+ case node.Style&FoldedStyle != 0:
+ style = yaml_FOLDED_SCALAR_STYLE
+ case strings.Contains(value, "\n"):
+ style = yaml_LITERAL_SCALAR_STYLE
+ case forceQuoting:
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+
+ e.emitScalar(value, node.Anchor, tag, style, []byte(node.HeadComment), []byte(node.LineComment), []byte(node.FootComment), []byte(tail))
+ default:
+ failf("cannot encode node with unknown kind %d", node.Kind)
+ }
+}
diff --git a/vendor/go.yaml.in/yaml/v3/parserc.go b/vendor/go.yaml.in/yaml/v3/parserc.go
new file mode 100644
index 000000000..25fe82363
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v3/parserc.go
@@ -0,0 +1,1274 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "bytes"
+)
+
+// The parser implements the following grammar:
+//
+// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+// implicit_document ::= block_node DOCUMENT-END*
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// block_node_or_indentless_sequence ::=
+// ALIAS
+// | properties (block_content | indentless_block_sequence)?
+// | block_content
+// | indentless_block_sequence
+// block_node ::= ALIAS
+// | properties block_content?
+// | block_content
+// flow_node ::= ALIAS
+// | properties flow_content?
+// | flow_content
+// properties ::= TAG ANCHOR? | ANCHOR TAG?
+// block_content ::= block_collection | flow_collection | SCALAR
+// flow_content ::= flow_collection | SCALAR
+// block_collection ::= block_sequence | block_mapping
+// flow_collection ::= flow_sequence | flow_mapping
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+// block_mapping ::= BLOCK-MAPPING_START
+// ((KEY block_node_or_indentless_sequence?)?
+// (VALUE block_node_or_indentless_sequence?)?)*
+// BLOCK-END
+// flow_sequence ::= FLOW-SEQUENCE-START
+// (flow_sequence_entry FLOW-ENTRY)*
+// flow_sequence_entry?
+// FLOW-SEQUENCE-END
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// flow_mapping ::= FLOW-MAPPING-START
+// (flow_mapping_entry FLOW-ENTRY)*
+// flow_mapping_entry?
+// FLOW-MAPPING-END
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+// Peek the next token in the token queue.
+func peek_token(parser *yaml_parser_t) *yaml_token_t {
+ if parser.token_available || yaml_parser_fetch_more_tokens(parser) {
+ token := &parser.tokens[parser.tokens_head]
+ yaml_parser_unfold_comments(parser, token)
+ return token
+ }
+ return nil
+}
+
+// yaml_parser_unfold_comments walks through the comments queue and joins all
+// comments behind the position of the provided token into the respective
+// top-level comment slices in the parser.
+func yaml_parser_unfold_comments(parser *yaml_parser_t, token *yaml_token_t) {
+ for parser.comments_head < len(parser.comments) && token.start_mark.index >= parser.comments[parser.comments_head].token_mark.index {
+ comment := &parser.comments[parser.comments_head]
+ if len(comment.head) > 0 {
+ if token.typ == yaml_BLOCK_END_TOKEN {
+ // No heads on ends, so keep comment.head for a follow up token.
+ break
+ }
+ if len(parser.head_comment) > 0 {
+ parser.head_comment = append(parser.head_comment, '\n')
+ }
+ parser.head_comment = append(parser.head_comment, comment.head...)
+ }
+ if len(comment.foot) > 0 {
+ if len(parser.foot_comment) > 0 {
+ parser.foot_comment = append(parser.foot_comment, '\n')
+ }
+ parser.foot_comment = append(parser.foot_comment, comment.foot...)
+ }
+ if len(comment.line) > 0 {
+ if len(parser.line_comment) > 0 {
+ parser.line_comment = append(parser.line_comment, '\n')
+ }
+ parser.line_comment = append(parser.line_comment, comment.line...)
+ }
+ *comment = yaml_comment_t{}
+ parser.comments_head++
+ }
+}
+
+// Remove the next token from the queue (must be called after peek_token).
+func skip_token(parser *yaml_parser_t) {
+ parser.token_available = false
+ parser.tokens_parsed++
+ parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN
+ parser.tokens_head++
+}
+
+// Get the next event.
+func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool {
+ // Erase the event object.
+ *event = yaml_event_t{}
+
+ // No events after the end of the stream or error.
+ if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE {
+ return true
+ }
+
+ // Generate the next event.
+ return yaml_parser_state_machine(parser, event)
+}
+
+// Set parser error.
+func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool {
+ parser.error = yaml_PARSER_ERROR
+ parser.problem = problem
+ parser.problem_mark = problem_mark
+ return false
+}
+
+func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool {
+ parser.error = yaml_PARSER_ERROR
+ parser.context = context
+ parser.context_mark = context_mark
+ parser.problem = problem
+ parser.problem_mark = problem_mark
+ return false
+}
+
+// State dispatcher.
+func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool {
+ //trace("yaml_parser_state_machine", "state:", parser.state.String())
+
+ switch parser.state {
+ case yaml_PARSE_STREAM_START_STATE:
+ return yaml_parser_parse_stream_start(parser, event)
+
+ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+ return yaml_parser_parse_document_start(parser, event, true)
+
+ case yaml_PARSE_DOCUMENT_START_STATE:
+ return yaml_parser_parse_document_start(parser, event, false)
+
+ case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+ return yaml_parser_parse_document_content(parser, event)
+
+ case yaml_PARSE_DOCUMENT_END_STATE:
+ return yaml_parser_parse_document_end(parser, event)
+
+ case yaml_PARSE_BLOCK_NODE_STATE:
+ return yaml_parser_parse_node(parser, event, true, false)
+
+ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+ return yaml_parser_parse_node(parser, event, true, true)
+
+ case yaml_PARSE_FLOW_NODE_STATE:
+ return yaml_parser_parse_node(parser, event, false, false)
+
+ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+ return yaml_parser_parse_block_sequence_entry(parser, event, true)
+
+ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_block_sequence_entry(parser, event, false)
+
+ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_indentless_sequence_entry(parser, event)
+
+ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return yaml_parser_parse_block_mapping_key(parser, event, true)
+
+ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+ return yaml_parser_parse_block_mapping_key(parser, event, false)
+
+ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_block_mapping_value(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+ return yaml_parser_parse_flow_sequence_entry(parser, event, true)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_flow_sequence_entry(parser, event, false)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event)
+
+ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+ return yaml_parser_parse_flow_mapping_key(parser, event, true)
+
+ case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+ return yaml_parser_parse_flow_mapping_key(parser, event, false)
+
+ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_flow_mapping_value(parser, event, false)
+
+ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+ return yaml_parser_parse_flow_mapping_value(parser, event, true)
+
+ default:
+ panic("invalid parser state")
+ }
+}
+
+// Parse the production:
+// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+//
+// ************
+func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_STREAM_START_TOKEN {
+ return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark)
+ }
+ parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
+ *event = yaml_event_t{
+ typ: yaml_STREAM_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ encoding: token.encoding,
+ }
+ skip_token(parser)
+ return true
+}
+
+// Parse the productions:
+// implicit_document ::= block_node DOCUMENT-END*
+//
+// *
+//
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+//
+// *************************
+func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool {
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ // Parse extra document end indicators.
+ if !implicit {
+ for token.typ == yaml_DOCUMENT_END_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ }
+
+ if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN &&
+ token.typ != yaml_TAG_DIRECTIVE_TOKEN &&
+ token.typ != yaml_DOCUMENT_START_TOKEN &&
+ token.typ != yaml_STREAM_END_TOKEN {
+ // Parse an implicit document.
+ if !yaml_parser_process_directives(parser, nil, nil) {
+ return false
+ }
+ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+ parser.state = yaml_PARSE_BLOCK_NODE_STATE
+
+ var head_comment []byte
+ if len(parser.head_comment) > 0 {
+ // [Go] Scan the header comment backwards, and if an empty line is found, break
+ // the header so the part before the last empty line goes into the
+ // document header, while the bottom of it goes into a follow up event.
+ for i := len(parser.head_comment) - 1; i > 0; i-- {
+ if parser.head_comment[i] == '\n' {
+ if i == len(parser.head_comment)-1 {
+ head_comment = parser.head_comment[:i]
+ parser.head_comment = parser.head_comment[i+1:]
+ break
+ } else if parser.head_comment[i-1] == '\n' {
+ head_comment = parser.head_comment[:i-1]
+ parser.head_comment = parser.head_comment[i+1:]
+ break
+ }
+ }
+ }
+ }
+
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+
+ head_comment: head_comment,
+ }
+
+ } else if token.typ != yaml_STREAM_END_TOKEN {
+ // Parse an explicit document.
+ var version_directive *yaml_version_directive_t
+ var tag_directives []yaml_tag_directive_t
+ start_mark := token.start_mark
+ if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) {
+ return false
+ }
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_DOCUMENT_START_TOKEN {
+ yaml_parser_set_parser_error(parser,
+ "did not find expected ", token.start_mark)
+ return false
+ }
+ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+ parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE
+ end_mark := token.end_mark
+
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ version_directive: version_directive,
+ tag_directives: tag_directives,
+ implicit: false,
+ }
+ skip_token(parser)
+
+ } else {
+ // Parse the stream end.
+ parser.state = yaml_PARSE_END_STATE
+ *event = yaml_event_t{
+ typ: yaml_STREAM_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ }
+
+ return true
+}
+
+// Parse the productions:
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+//
+// ***********
+func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_VERSION_DIRECTIVE_TOKEN ||
+ token.typ == yaml_TAG_DIRECTIVE_TOKEN ||
+ token.typ == yaml_DOCUMENT_START_TOKEN ||
+ token.typ == yaml_DOCUMENT_END_TOKEN ||
+ token.typ == yaml_STREAM_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ return yaml_parser_process_empty_scalar(parser, event,
+ token.start_mark)
+ }
+ return yaml_parser_parse_node(parser, event, true, false)
+}
+
+// Parse the productions:
+// implicit_document ::= block_node DOCUMENT-END*
+//
+// *************
+//
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ start_mark := token.start_mark
+ end_mark := token.start_mark
+
+ implicit := true
+ if token.typ == yaml_DOCUMENT_END_TOKEN {
+ end_mark = token.end_mark
+ skip_token(parser)
+ implicit = false
+ }
+
+ parser.tag_directives = parser.tag_directives[:0]
+
+ parser.state = yaml_PARSE_DOCUMENT_START_STATE
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_END_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ implicit: implicit,
+ }
+ yaml_parser_set_event_comments(parser, event)
+ if len(event.head_comment) > 0 && len(event.foot_comment) == 0 {
+ event.foot_comment = event.head_comment
+ event.head_comment = nil
+ }
+ return true
+}
+
+func yaml_parser_set_event_comments(parser *yaml_parser_t, event *yaml_event_t) {
+ event.head_comment = parser.head_comment
+ event.line_comment = parser.line_comment
+ event.foot_comment = parser.foot_comment
+ parser.head_comment = nil
+ parser.line_comment = nil
+ parser.foot_comment = nil
+ parser.tail_comment = nil
+ parser.stem_comment = nil
+}
+
+// Parse the productions:
+// block_node_or_indentless_sequence ::=
+//
+// ALIAS
+// *****
+// | properties (block_content | indentless_block_sequence)?
+// ********** *
+// | block_content | indentless_block_sequence
+// *
+//
+// block_node ::= ALIAS
+//
+// *****
+// | properties block_content?
+// ********** *
+// | block_content
+// *
+//
+// flow_node ::= ALIAS
+//
+// *****
+// | properties flow_content?
+// ********** *
+// | flow_content
+// *
+//
+// properties ::= TAG ANCHOR? | ANCHOR TAG?
+//
+// *************************
+//
+// block_content ::= block_collection | flow_collection | SCALAR
+//
+// ******
+//
+// flow_content ::= flow_collection | SCALAR
+//
+// ******
+func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool {
+ //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)()
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_ALIAS_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ *event = yaml_event_t{
+ typ: yaml_ALIAS_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ anchor: token.value,
+ }
+ yaml_parser_set_event_comments(parser, event)
+ skip_token(parser)
+ return true
+ }
+
+ start_mark := token.start_mark
+ end_mark := token.start_mark
+
+ var tag_token bool
+ var tag_handle, tag_suffix, anchor []byte
+ var tag_mark yaml_mark_t
+ if token.typ == yaml_ANCHOR_TOKEN {
+ anchor = token.value
+ start_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_TAG_TOKEN {
+ tag_token = true
+ tag_handle = token.value
+ tag_suffix = token.suffix
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ } else if token.typ == yaml_TAG_TOKEN {
+ tag_token = true
+ tag_handle = token.value
+ tag_suffix = token.suffix
+ start_mark = token.start_mark
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_ANCHOR_TOKEN {
+ anchor = token.value
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ }
+
+ var tag []byte
+ if tag_token {
+ if len(tag_handle) == 0 {
+ tag = tag_suffix
+ tag_suffix = nil
+ } else {
+ for i := range parser.tag_directives {
+ if bytes.Equal(parser.tag_directives[i].handle, tag_handle) {
+ tag = append([]byte(nil), parser.tag_directives[i].prefix...)
+ tag = append(tag, tag_suffix...)
+ break
+ }
+ }
+ if len(tag) == 0 {
+ yaml_parser_set_parser_error_context(parser,
+ "while parsing a node", start_mark,
+ "found undefined tag handle", tag_mark)
+ return false
+ }
+ }
+ }
+
+ implicit := len(tag) == 0
+ if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if token.typ == yaml_SCALAR_TOKEN {
+ var plain_implicit, quoted_implicit bool
+ end_mark = token.end_mark
+ if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') {
+ plain_implicit = true
+ } else if len(tag) == 0 {
+ quoted_implicit = true
+ }
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ value: token.value,
+ implicit: plain_implicit,
+ quoted_implicit: quoted_implicit,
+ style: yaml_style_t(token.style),
+ }
+ yaml_parser_set_event_comments(parser, event)
+ skip_token(parser)
+ return true
+ }
+ if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN {
+ // [Go] Some of the events below can be merged as they differ only on style.
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE),
+ }
+ yaml_parser_set_event_comments(parser, event)
+ return true
+ }
+ if token.typ == yaml_FLOW_MAPPING_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+ }
+ yaml_parser_set_event_comments(parser, event)
+ return true
+ }
+ if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+ }
+ if parser.stem_comment != nil {
+ event.head_comment = parser.stem_comment
+ parser.stem_comment = nil
+ }
+ return true
+ }
+ if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE),
+ }
+ if parser.stem_comment != nil {
+ event.head_comment = parser.stem_comment
+ parser.stem_comment = nil
+ }
+ return true
+ }
+ if len(anchor) > 0 || len(tag) > 0 {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ quoted_implicit: false,
+ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+ }
+ return true
+ }
+
+ context := "while parsing a flow node"
+ if block {
+ context = "while parsing a block node"
+ }
+ yaml_parser_set_parser_error_context(parser, context, start_mark,
+ "did not find expected node content", token.start_mark)
+ return false
+}
+
+// Parse the productions:
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+//
+// ******************** *********** * *********
+func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ mark := token.end_mark
+ prior_head_len := len(parser.head_comment)
+ skip_token(parser)
+ yaml_parser_split_stem_comment(parser, prior_head_len)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, true, false)
+ } else {
+ parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ }
+ if token.typ == yaml_BLOCK_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ skip_token(parser)
+ return true
+ }
+
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a block collection", context_mark,
+ "did not find expected '-' indicator", token.start_mark)
+}
+
+// Parse the productions:
+// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+//
+// *********** *
+func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ mark := token.end_mark
+ prior_head_len := len(parser.head_comment)
+ skip_token(parser)
+ yaml_parser_split_stem_comment(parser, prior_head_len)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_BLOCK_ENTRY_TOKEN &&
+ token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, true, false)
+ }
+ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark?
+ }
+ return true
+}
+
+// Split stem comment from head comment.
+//
+// When a sequence or map is found under a sequence entry, the former head comment
+// is assigned to the underlying sequence or map as a whole, not the individual
+// sequence or map entry as would be expected otherwise. To handle this case the
+// previous head comment is moved aside as the stem comment.
+func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) {
+ if stem_len == 0 {
+ return
+ }
+
+ token := peek_token(parser)
+ if token == nil || token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN {
+ return
+ }
+
+ parser.stem_comment = parser.head_comment[:stem_len]
+ if len(parser.head_comment) == stem_len {
+ parser.head_comment = nil
+ } else {
+ // Copy suffix to prevent very strange bugs if someone ever appends
+ // further bytes to the prefix in the stem_comment slice above.
+ parser.head_comment = append([]byte(nil), parser.head_comment[stem_len+1:]...)
+ }
+}
+
+// Parse the productions:
+// block_mapping ::= BLOCK-MAPPING_START
+//
+// *******************
+// ((KEY block_node_or_indentless_sequence?)?
+// *** *
+// (VALUE block_node_or_indentless_sequence?)?)*
+//
+// BLOCK-END
+// *********
+func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ // [Go] A tail comment was left from the prior mapping value processed. Emit an event
+ // as it needs to be processed with that value and not the following key.
+ if len(parser.tail_comment) > 0 {
+ *event = yaml_event_t{
+ typ: yaml_TAIL_COMMENT_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ foot_comment: parser.tail_comment,
+ }
+ parser.tail_comment = nil
+ return true
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, true, true)
+ } else {
+ parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ } else if token.typ == yaml_BLOCK_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ yaml_parser_set_event_comments(parser, event)
+ skip_token(parser)
+ return true
+ }
+
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a block mapping", context_mark,
+ "did not find expected key", token.start_mark)
+}
+
+// Parse the productions:
+// block_mapping ::= BLOCK-MAPPING_START
+//
+// ((KEY block_node_or_indentless_sequence?)?
+//
+// (VALUE block_node_or_indentless_sequence?)?)*
+// ***** *
+// BLOCK-END
+func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE)
+ return yaml_parser_parse_node(parser, event, true, true)
+ }
+ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence ::= FLOW-SEQUENCE-START
+//
+// *******************
+// (flow_sequence_entry FLOW-ENTRY)*
+// * **********
+// flow_sequence_entry?
+// *
+// FLOW-SEQUENCE-END
+// *****************
+//
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//
+// *
+func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ if !first {
+ if token.typ == yaml_FLOW_ENTRY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ } else {
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a flow sequence", context_mark,
+ "did not find expected ',' or ']'", token.start_mark)
+ }
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ implicit: true,
+ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+ }
+ skip_token(parser)
+ return true
+ } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ yaml_parser_set_event_comments(parser, event)
+
+ skip_token(parser)
+ return true
+}
+
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//
+// *** *
+func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_FLOW_ENTRY_TOKEN &&
+ token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ mark := token.end_mark
+ skip_token(parser)
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//
+// ***** *
+func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ skip_token(parser)
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//
+// *
+func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.start_mark, // [Go] Shouldn't this be end_mark?
+ }
+ return true
+}
+
+// Parse the productions:
+// flow_mapping ::= FLOW-MAPPING-START
+//
+// ******************
+// (flow_mapping_entry FLOW-ENTRY)*
+// * **********
+// flow_mapping_entry?
+// ******************
+// FLOW-MAPPING-END
+// ****************
+//
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// - *** *
+func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ if !first {
+ if token.typ == yaml_FLOW_ENTRY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ } else {
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a flow mapping", context_mark,
+ "did not find expected ',' or '}'", token.start_mark)
+ }
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_FLOW_ENTRY_TOKEN &&
+ token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ } else {
+ parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+ }
+ } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ yaml_parser_set_event_comments(parser, event)
+ skip_token(parser)
+ return true
+}
+
+// Parse the productions:
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// - ***** *
+func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if empty {
+ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Generate an empty scalar event.
+func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: mark,
+ end_mark: mark,
+ value: nil, // Empty
+ implicit: true,
+ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+ }
+ return true
+}
+
+var default_tag_directives = []yaml_tag_directive_t{
+ {[]byte("!"), []byte("!")},
+ {[]byte("!!"), []byte("tag:yaml.org,2002:")},
+}
+
+// Parse directives.
+func yaml_parser_process_directives(parser *yaml_parser_t,
+ version_directive_ref **yaml_version_directive_t,
+ tag_directives_ref *[]yaml_tag_directive_t) bool {
+
+ var version_directive *yaml_version_directive_t
+ var tag_directives []yaml_tag_directive_t
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+ if token.typ == yaml_VERSION_DIRECTIVE_TOKEN {
+ if version_directive != nil {
+ yaml_parser_set_parser_error(parser,
+ "found duplicate %YAML directive", token.start_mark)
+ return false
+ }
+ if token.major != 1 || token.minor != 1 {
+ yaml_parser_set_parser_error(parser,
+ "found incompatible YAML document", token.start_mark)
+ return false
+ }
+ version_directive = &yaml_version_directive_t{
+ major: token.major,
+ minor: token.minor,
+ }
+ } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+ value := yaml_tag_directive_t{
+ handle: token.value,
+ prefix: token.prefix,
+ }
+ if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) {
+ return false
+ }
+ tag_directives = append(tag_directives, value)
+ }
+
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+
+ for i := range default_tag_directives {
+ if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) {
+ return false
+ }
+ }
+
+ if version_directive_ref != nil {
+ *version_directive_ref = version_directive
+ }
+ if tag_directives_ref != nil {
+ *tag_directives_ref = tag_directives
+ }
+ return true
+}
+
+// Append a tag directive to the directives stack.
+func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool {
+ for i := range parser.tag_directives {
+ if bytes.Equal(value.handle, parser.tag_directives[i].handle) {
+ if allow_duplicates {
+ return true
+ }
+ return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark)
+ }
+ }
+
+ // [Go] I suspect the copy is unnecessary. This was likely done
+ // because there was no way to track ownership of the data.
+ value_copy := yaml_tag_directive_t{
+ handle: make([]byte, len(value.handle)),
+ prefix: make([]byte, len(value.prefix)),
+ }
+ copy(value_copy.handle, value.handle)
+ copy(value_copy.prefix, value.prefix)
+ parser.tag_directives = append(parser.tag_directives, value_copy)
+ return true
+}
diff --git a/vendor/go.yaml.in/yaml/v3/readerc.go b/vendor/go.yaml.in/yaml/v3/readerc.go
new file mode 100644
index 000000000..56af24536
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v3/readerc.go
@@ -0,0 +1,434 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "io"
+)
+
+// Set the reader error and return 0.
+func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
+ parser.error = yaml_READER_ERROR
+ parser.problem = problem
+ parser.problem_offset = offset
+ parser.problem_value = value
+ return false
+}
+
+// Byte order marks.
+const (
+ bom_UTF8 = "\xef\xbb\xbf"
+ bom_UTF16LE = "\xff\xfe"
+ bom_UTF16BE = "\xfe\xff"
+)
+
+// Determine the input stream encoding by checking the BOM symbol. If no BOM is
+// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
+func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
+ // Ensure that we had enough bytes in the raw buffer.
+ for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
+ if !yaml_parser_update_raw_buffer(parser) {
+ return false
+ }
+ }
+
+ // Determine the encoding.
+ buf := parser.raw_buffer
+ pos := parser.raw_buffer_pos
+ avail := len(buf) - pos
+ if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
+ parser.encoding = yaml_UTF16LE_ENCODING
+ parser.raw_buffer_pos += 2
+ parser.offset += 2
+ } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
+ parser.encoding = yaml_UTF16BE_ENCODING
+ parser.raw_buffer_pos += 2
+ parser.offset += 2
+ } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
+ parser.encoding = yaml_UTF8_ENCODING
+ parser.raw_buffer_pos += 3
+ parser.offset += 3
+ } else {
+ parser.encoding = yaml_UTF8_ENCODING
+ }
+ return true
+}
+
+// Update the raw buffer.
+func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
+ size_read := 0
+
+ // Return if the raw buffer is full.
+ if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
+ return true
+ }
+
+ // Return on EOF.
+ if parser.eof {
+ return true
+ }
+
+ // Move the remaining bytes in the raw buffer to the beginning.
+ if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
+ copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
+ }
+ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
+ parser.raw_buffer_pos = 0
+
+ // Call the read handler to fill the buffer.
+ size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
+ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
+ if err == io.EOF {
+ parser.eof = true
+ } else if err != nil {
+ return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
+ }
+ return true
+}
+
+// Ensure that the buffer contains at least `length` characters.
+// Return true on success, false on failure.
+//
+// The length is supposed to be significantly less that the buffer size.
+func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
+ if parser.read_handler == nil {
+ panic("read handler must be set")
+ }
+
+ // [Go] This function was changed to guarantee the requested length size at EOF.
+ // The fact we need to do this is pretty awful, but the description above implies
+ // for that to be the case, and there are tests
+
+ // If the EOF flag is set and the raw buffer is empty, do nothing.
+ if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
+ // [Go] ACTUALLY! Read the documentation of this function above.
+ // This is just broken. To return true, we need to have the
+ // given length in the buffer. Not doing that means every single
+ // check that calls this function to make sure the buffer has a
+ // given length is Go) panicking; or C) accessing invalid memory.
+ //return true
+ }
+
+ // Return if the buffer contains enough characters.
+ if parser.unread >= length {
+ return true
+ }
+
+ // Determine the input encoding if it is not known yet.
+ if parser.encoding == yaml_ANY_ENCODING {
+ if !yaml_parser_determine_encoding(parser) {
+ return false
+ }
+ }
+
+ // Move the unread characters to the beginning of the buffer.
+ buffer_len := len(parser.buffer)
+ if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
+ copy(parser.buffer, parser.buffer[parser.buffer_pos:])
+ buffer_len -= parser.buffer_pos
+ parser.buffer_pos = 0
+ } else if parser.buffer_pos == buffer_len {
+ buffer_len = 0
+ parser.buffer_pos = 0
+ }
+
+ // Open the whole buffer for writing, and cut it before returning.
+ parser.buffer = parser.buffer[:cap(parser.buffer)]
+
+ // Fill the buffer until it has enough characters.
+ first := true
+ for parser.unread < length {
+
+ // Fill the raw buffer if necessary.
+ if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
+ if !yaml_parser_update_raw_buffer(parser) {
+ parser.buffer = parser.buffer[:buffer_len]
+ return false
+ }
+ }
+ first = false
+
+ // Decode the raw buffer.
+ inner:
+ for parser.raw_buffer_pos != len(parser.raw_buffer) {
+ var value rune
+ var width int
+
+ raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
+
+ // Decode the next character.
+ switch parser.encoding {
+ case yaml_UTF8_ENCODING:
+ // Decode a UTF-8 character. Check RFC 3629
+ // (http://www.ietf.org/rfc/rfc3629.txt) for more details.
+ //
+ // The following table (taken from the RFC) is used for
+ // decoding.
+ //
+ // Char. number range | UTF-8 octet sequence
+ // (hexadecimal) | (binary)
+ // --------------------+------------------------------------
+ // 0000 0000-0000 007F | 0xxxxxxx
+ // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
+ // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
+ // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ //
+ // Additionally, the characters in the range 0xD800-0xDFFF
+ // are prohibited as they are reserved for use with UTF-16
+ // surrogate pairs.
+
+ // Determine the length of the UTF-8 sequence.
+ octet := parser.raw_buffer[parser.raw_buffer_pos]
+ switch {
+ case octet&0x80 == 0x00:
+ width = 1
+ case octet&0xE0 == 0xC0:
+ width = 2
+ case octet&0xF0 == 0xE0:
+ width = 3
+ case octet&0xF8 == 0xF0:
+ width = 4
+ default:
+ // The leading octet is invalid.
+ return yaml_parser_set_reader_error(parser,
+ "invalid leading UTF-8 octet",
+ parser.offset, int(octet))
+ }
+
+ // Check if the raw buffer contains an incomplete character.
+ if width > raw_unread {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-8 octet sequence",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Decode the leading octet.
+ switch {
+ case octet&0x80 == 0x00:
+ value = rune(octet & 0x7F)
+ case octet&0xE0 == 0xC0:
+ value = rune(octet & 0x1F)
+ case octet&0xF0 == 0xE0:
+ value = rune(octet & 0x0F)
+ case octet&0xF8 == 0xF0:
+ value = rune(octet & 0x07)
+ default:
+ value = 0
+ }
+
+ // Check and decode the trailing octets.
+ for k := 1; k < width; k++ {
+ octet = parser.raw_buffer[parser.raw_buffer_pos+k]
+
+ // Check if the octet is valid.
+ if (octet & 0xC0) != 0x80 {
+ return yaml_parser_set_reader_error(parser,
+ "invalid trailing UTF-8 octet",
+ parser.offset+k, int(octet))
+ }
+
+ // Decode the octet.
+ value = (value << 6) + rune(octet&0x3F)
+ }
+
+ // Check the length of the sequence against the value.
+ switch {
+ case width == 1:
+ case width == 2 && value >= 0x80:
+ case width == 3 && value >= 0x800:
+ case width == 4 && value >= 0x10000:
+ default:
+ return yaml_parser_set_reader_error(parser,
+ "invalid length of a UTF-8 sequence",
+ parser.offset, -1)
+ }
+
+ // Check the range of the value.
+ if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
+ return yaml_parser_set_reader_error(parser,
+ "invalid Unicode character",
+ parser.offset, int(value))
+ }
+
+ case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
+ var low, high int
+ if parser.encoding == yaml_UTF16LE_ENCODING {
+ low, high = 0, 1
+ } else {
+ low, high = 1, 0
+ }
+
+ // The UTF-16 encoding is not as simple as one might
+ // naively think. Check RFC 2781
+ // (http://www.ietf.org/rfc/rfc2781.txt).
+ //
+ // Normally, two subsequent bytes describe a Unicode
+ // character. However a special technique (called a
+ // surrogate pair) is used for specifying character
+ // values larger than 0xFFFF.
+ //
+ // A surrogate pair consists of two pseudo-characters:
+ // high surrogate area (0xD800-0xDBFF)
+ // low surrogate area (0xDC00-0xDFFF)
+ //
+ // The following formulas are used for decoding
+ // and encoding characters using surrogate pairs:
+ //
+ // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
+ // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
+ // W1 = 110110yyyyyyyyyy
+ // W2 = 110111xxxxxxxxxx
+ //
+ // where U is the character value, W1 is the high surrogate
+ // area, W2 is the low surrogate area.
+
+ // Check for incomplete UTF-16 character.
+ if raw_unread < 2 {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-16 character",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Get the character.
+ value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
+ (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
+
+ // Check for unexpected low surrogate area.
+ if value&0xFC00 == 0xDC00 {
+ return yaml_parser_set_reader_error(parser,
+ "unexpected low surrogate area",
+ parser.offset, int(value))
+ }
+
+ // Check for a high surrogate area.
+ if value&0xFC00 == 0xD800 {
+ width = 4
+
+ // Check for incomplete surrogate pair.
+ if raw_unread < 4 {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-16 surrogate pair",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Get the next character.
+ value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
+ (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
+
+ // Check for a low surrogate area.
+ if value2&0xFC00 != 0xDC00 {
+ return yaml_parser_set_reader_error(parser,
+ "expected low surrogate area",
+ parser.offset+2, int(value2))
+ }
+
+ // Generate the value of the surrogate pair.
+ value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
+ } else {
+ width = 2
+ }
+
+ default:
+ panic("impossible")
+ }
+
+ // Check if the character is in the allowed range:
+ // #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
+ // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
+ // | [#x10000-#x10FFFF] (32 bit)
+ switch {
+ case value == 0x09:
+ case value == 0x0A:
+ case value == 0x0D:
+ case value >= 0x20 && value <= 0x7E:
+ case value == 0x85:
+ case value >= 0xA0 && value <= 0xD7FF:
+ case value >= 0xE000 && value <= 0xFFFD:
+ case value >= 0x10000 && value <= 0x10FFFF:
+ default:
+ return yaml_parser_set_reader_error(parser,
+ "control characters are not allowed",
+ parser.offset, int(value))
+ }
+
+ // Move the raw pointers.
+ parser.raw_buffer_pos += width
+ parser.offset += width
+
+ // Finally put the character into the buffer.
+ if value <= 0x7F {
+ // 0000 0000-0000 007F . 0xxxxxxx
+ parser.buffer[buffer_len+0] = byte(value)
+ buffer_len += 1
+ } else if value <= 0x7FF {
+ // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
+ parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
+ buffer_len += 2
+ } else if value <= 0xFFFF {
+ // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
+ parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
+ parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
+ buffer_len += 3
+ } else {
+ // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
+ parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
+ parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
+ parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
+ buffer_len += 4
+ }
+
+ parser.unread++
+ }
+
+ // On EOF, put NUL into the buffer and return.
+ if parser.eof {
+ parser.buffer[buffer_len] = 0
+ buffer_len++
+ parser.unread++
+ break
+ }
+ }
+ // [Go] Read the documentation of this function above. To return true,
+ // we need to have the given length in the buffer. Not doing that means
+ // every single check that calls this function to make sure the buffer
+ // has a given length is Go) panicking; or C) accessing invalid memory.
+ // This happens here due to the EOF above breaking early.
+ for buffer_len < length {
+ parser.buffer[buffer_len] = 0
+ buffer_len++
+ }
+ parser.buffer = parser.buffer[:buffer_len]
+ return true
+}
diff --git a/vendor/go.yaml.in/yaml/v3/resolve.go b/vendor/go.yaml.in/yaml/v3/resolve.go
new file mode 100644
index 000000000..64ae88805
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v3/resolve.go
@@ -0,0 +1,326 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package yaml
+
+import (
+ "encoding/base64"
+ "math"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type resolveMapItem struct {
+ value interface{}
+ tag string
+}
+
+var resolveTable = make([]byte, 256)
+var resolveMap = make(map[string]resolveMapItem)
+
+func init() {
+ t := resolveTable
+ t[int('+')] = 'S' // Sign
+ t[int('-')] = 'S'
+ for _, c := range "0123456789" {
+ t[int(c)] = 'D' // Digit
+ }
+ for _, c := range "yYnNtTfFoO~" {
+ t[int(c)] = 'M' // In map
+ }
+ t[int('.')] = '.' // Float (potentially in map)
+
+ var resolveMapList = []struct {
+ v interface{}
+ tag string
+ l []string
+ }{
+ {true, boolTag, []string{"true", "True", "TRUE"}},
+ {false, boolTag, []string{"false", "False", "FALSE"}},
+ {nil, nullTag, []string{"", "~", "null", "Null", "NULL"}},
+ {math.NaN(), floatTag, []string{".nan", ".NaN", ".NAN"}},
+ {math.Inf(+1), floatTag, []string{".inf", ".Inf", ".INF"}},
+ {math.Inf(+1), floatTag, []string{"+.inf", "+.Inf", "+.INF"}},
+ {math.Inf(-1), floatTag, []string{"-.inf", "-.Inf", "-.INF"}},
+ {"<<", mergeTag, []string{"<<"}},
+ }
+
+ m := resolveMap
+ for _, item := range resolveMapList {
+ for _, s := range item.l {
+ m[s] = resolveMapItem{item.v, item.tag}
+ }
+ }
+}
+
+const (
+ nullTag = "!!null"
+ boolTag = "!!bool"
+ strTag = "!!str"
+ intTag = "!!int"
+ floatTag = "!!float"
+ timestampTag = "!!timestamp"
+ seqTag = "!!seq"
+ mapTag = "!!map"
+ binaryTag = "!!binary"
+ mergeTag = "!!merge"
+)
+
+var longTags = make(map[string]string)
+var shortTags = make(map[string]string)
+
+func init() {
+ for _, stag := range []string{nullTag, boolTag, strTag, intTag, floatTag, timestampTag, seqTag, mapTag, binaryTag, mergeTag} {
+ ltag := longTag(stag)
+ longTags[stag] = ltag
+ shortTags[ltag] = stag
+ }
+}
+
+const longTagPrefix = "tag:yaml.org,2002:"
+
+func shortTag(tag string) string {
+ if strings.HasPrefix(tag, longTagPrefix) {
+ if stag, ok := shortTags[tag]; ok {
+ return stag
+ }
+ return "!!" + tag[len(longTagPrefix):]
+ }
+ return tag
+}
+
+func longTag(tag string) string {
+ if strings.HasPrefix(tag, "!!") {
+ if ltag, ok := longTags[tag]; ok {
+ return ltag
+ }
+ return longTagPrefix + tag[2:]
+ }
+ return tag
+}
+
+func resolvableTag(tag string) bool {
+ switch tag {
+ case "", strTag, boolTag, intTag, floatTag, nullTag, timestampTag:
+ return true
+ }
+ return false
+}
+
+var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`)
+
+func resolve(tag string, in string) (rtag string, out interface{}) {
+ tag = shortTag(tag)
+ if !resolvableTag(tag) {
+ return tag, in
+ }
+
+ defer func() {
+ switch tag {
+ case "", rtag, strTag, binaryTag:
+ return
+ case floatTag:
+ if rtag == intTag {
+ switch v := out.(type) {
+ case int64:
+ rtag = floatTag
+ out = float64(v)
+ return
+ case int:
+ rtag = floatTag
+ out = float64(v)
+ return
+ }
+ }
+ }
+ failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
+ }()
+
+ // Any data is accepted as a !!str or !!binary.
+ // Otherwise, the prefix is enough of a hint about what it might be.
+ hint := byte('N')
+ if in != "" {
+ hint = resolveTable[in[0]]
+ }
+ if hint != 0 && tag != strTag && tag != binaryTag {
+ // Handle things we can lookup in a map.
+ if item, ok := resolveMap[in]; ok {
+ return item.tag, item.value
+ }
+
+ // Base 60 floats are a bad idea, were dropped in YAML 1.2, and
+ // are purposefully unsupported here. They're still quoted on
+ // the way out for compatibility with other parser, though.
+
+ switch hint {
+ case 'M':
+ // We've already checked the map above.
+
+ case '.':
+ // Not in the map, so maybe a normal float.
+ floatv, err := strconv.ParseFloat(in, 64)
+ if err == nil {
+ return floatTag, floatv
+ }
+
+ case 'D', 'S':
+ // Int, float, or timestamp.
+ // Only try values as a timestamp if the value is unquoted or there's an explicit
+ // !!timestamp tag.
+ if tag == "" || tag == timestampTag {
+ t, ok := parseTimestamp(in)
+ if ok {
+ return timestampTag, t
+ }
+ }
+
+ plain := strings.Replace(in, "_", "", -1)
+ intv, err := strconv.ParseInt(plain, 0, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return intTag, int(intv)
+ } else {
+ return intTag, intv
+ }
+ }
+ uintv, err := strconv.ParseUint(plain, 0, 64)
+ if err == nil {
+ return intTag, uintv
+ }
+ if yamlStyleFloat.MatchString(plain) {
+ floatv, err := strconv.ParseFloat(plain, 64)
+ if err == nil {
+ return floatTag, floatv
+ }
+ }
+ if strings.HasPrefix(plain, "0b") {
+ intv, err := strconv.ParseInt(plain[2:], 2, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return intTag, int(intv)
+ } else {
+ return intTag, intv
+ }
+ }
+ uintv, err := strconv.ParseUint(plain[2:], 2, 64)
+ if err == nil {
+ return intTag, uintv
+ }
+ } else if strings.HasPrefix(plain, "-0b") {
+ intv, err := strconv.ParseInt("-"+plain[3:], 2, 64)
+ if err == nil {
+ if true || intv == int64(int(intv)) {
+ return intTag, int(intv)
+ } else {
+ return intTag, intv
+ }
+ }
+ }
+ // Octals as introduced in version 1.2 of the spec.
+ // Octals from the 1.1 spec, spelled as 0777, are still
+ // decoded by default in v3 as well for compatibility.
+ // May be dropped in v4 depending on how usage evolves.
+ if strings.HasPrefix(plain, "0o") {
+ intv, err := strconv.ParseInt(plain[2:], 8, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return intTag, int(intv)
+ } else {
+ return intTag, intv
+ }
+ }
+ uintv, err := strconv.ParseUint(plain[2:], 8, 64)
+ if err == nil {
+ return intTag, uintv
+ }
+ } else if strings.HasPrefix(plain, "-0o") {
+ intv, err := strconv.ParseInt("-"+plain[3:], 8, 64)
+ if err == nil {
+ if true || intv == int64(int(intv)) {
+ return intTag, int(intv)
+ } else {
+ return intTag, intv
+ }
+ }
+ }
+ default:
+ panic("internal error: missing handler for resolver table: " + string(rune(hint)) + " (with " + in + ")")
+ }
+ }
+ return strTag, in
+}
+
+// encodeBase64 encodes s as base64 that is broken up into multiple lines
+// as appropriate for the resulting length.
+func encodeBase64(s string) string {
+ const lineLen = 70
+ encLen := base64.StdEncoding.EncodedLen(len(s))
+ lines := encLen/lineLen + 1
+ buf := make([]byte, encLen*2+lines)
+ in := buf[0:encLen]
+ out := buf[encLen:]
+ base64.StdEncoding.Encode(in, []byte(s))
+ k := 0
+ for i := 0; i < len(in); i += lineLen {
+ j := i + lineLen
+ if j > len(in) {
+ j = len(in)
+ }
+ k += copy(out[k:], in[i:j])
+ if lines > 1 {
+ out[k] = '\n'
+ k++
+ }
+ }
+ return string(out[:k])
+}
+
+// This is a subset of the formats allowed by the regular expression
+// defined at http://yaml.org/type/timestamp.html.
+var allowedTimestampFormats = []string{
+ "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields.
+ "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t".
+ "2006-1-2 15:4:5.999999999", // space separated with no time zone
+ "2006-1-2", // date only
+ // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5"
+ // from the set of examples.
+}
+
+// parseTimestamp parses s as a timestamp string and
+// returns the timestamp and reports whether it succeeded.
+// Timestamp formats are defined at http://yaml.org/type/timestamp.html
+func parseTimestamp(s string) (time.Time, bool) {
+ // TODO write code to check all the formats supported by
+ // http://yaml.org/type/timestamp.html instead of using time.Parse.
+
+ // Quick check: all date formats start with YYYY-.
+ i := 0
+ for ; i < len(s); i++ {
+ if c := s[i]; c < '0' || c > '9' {
+ break
+ }
+ }
+ if i != 4 || i == len(s) || s[i] != '-' {
+ return time.Time{}, false
+ }
+ for _, format := range allowedTimestampFormats {
+ if t, err := time.Parse(format, s); err == nil {
+ return t, true
+ }
+ }
+ return time.Time{}, false
+}
diff --git a/vendor/go.yaml.in/yaml/v3/scannerc.go b/vendor/go.yaml.in/yaml/v3/scannerc.go
new file mode 100644
index 000000000..30b1f0892
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v3/scannerc.go
@@ -0,0 +1,3040 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// Introduction
+// ************
+//
+// The following notes assume that you are familiar with the YAML specification
+// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in
+// some cases we are less restrictive that it requires.
+//
+// The process of transforming a YAML stream into a sequence of events is
+// divided on two steps: Scanning and Parsing.
+//
+// The Scanner transforms the input stream into a sequence of tokens, while the
+// parser transform the sequence of tokens produced by the Scanner into a
+// sequence of parsing events.
+//
+// The Scanner is rather clever and complicated. The Parser, on the contrary,
+// is a straightforward implementation of a recursive-descendant parser (or,
+// LL(1) parser, as it is usually called).
+//
+// Actually there are two issues of Scanning that might be called "clever", the
+// rest is quite straightforward. The issues are "block collection start" and
+// "simple keys". Both issues are explained below in details.
+//
+// Here the Scanning step is explained and implemented. We start with the list
+// of all the tokens produced by the Scanner together with short descriptions.
+//
+// Now, tokens:
+//
+// STREAM-START(encoding) # The stream start.
+// STREAM-END # The stream end.
+// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive.
+// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive.
+// DOCUMENT-START # '---'
+// DOCUMENT-END # '...'
+// BLOCK-SEQUENCE-START # Indentation increase denoting a block
+// BLOCK-MAPPING-START # sequence or a block mapping.
+// BLOCK-END # Indentation decrease.
+// FLOW-SEQUENCE-START # '['
+// FLOW-SEQUENCE-END # ']'
+// BLOCK-SEQUENCE-START # '{'
+// BLOCK-SEQUENCE-END # '}'
+// BLOCK-ENTRY # '-'
+// FLOW-ENTRY # ','
+// KEY # '?' or nothing (simple keys).
+// VALUE # ':'
+// ALIAS(anchor) # '*anchor'
+// ANCHOR(anchor) # '&anchor'
+// TAG(handle,suffix) # '!handle!suffix'
+// SCALAR(value,style) # A scalar.
+//
+// The following two tokens are "virtual" tokens denoting the beginning and the
+// end of the stream:
+//
+// STREAM-START(encoding)
+// STREAM-END
+//
+// We pass the information about the input stream encoding with the
+// STREAM-START token.
+//
+// The next two tokens are responsible for tags:
+//
+// VERSION-DIRECTIVE(major,minor)
+// TAG-DIRECTIVE(handle,prefix)
+//
+// Example:
+//
+// %YAML 1.1
+// %TAG ! !foo
+// %TAG !yaml! tag:yaml.org,2002:
+// ---
+//
+// The correspoding sequence of tokens:
+//
+// STREAM-START(utf-8)
+// VERSION-DIRECTIVE(1,1)
+// TAG-DIRECTIVE("!","!foo")
+// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:")
+// DOCUMENT-START
+// STREAM-END
+//
+// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole
+// line.
+//
+// The document start and end indicators are represented by:
+//
+// DOCUMENT-START
+// DOCUMENT-END
+//
+// Note that if a YAML stream contains an implicit document (without '---'
+// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be
+// produced.
+//
+// In the following examples, we present whole documents together with the
+// produced tokens.
+//
+// 1. An implicit document:
+//
+// 'a scalar'
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// SCALAR("a scalar",single-quoted)
+// STREAM-END
+//
+// 2. An explicit document:
+//
+// ---
+// 'a scalar'
+// ...
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// DOCUMENT-START
+// SCALAR("a scalar",single-quoted)
+// DOCUMENT-END
+// STREAM-END
+//
+// 3. Several documents in a stream:
+//
+// 'a scalar'
+// ---
+// 'another scalar'
+// ---
+// 'yet another scalar'
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// SCALAR("a scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("another scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("yet another scalar",single-quoted)
+// STREAM-END
+//
+// We have already introduced the SCALAR token above. The following tokens are
+// used to describe aliases, anchors, tag, and scalars:
+//
+// ALIAS(anchor)
+// ANCHOR(anchor)
+// TAG(handle,suffix)
+// SCALAR(value,style)
+//
+// The following series of examples illustrate the usage of these tokens:
+//
+// 1. A recursive sequence:
+//
+// &A [ *A ]
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// ANCHOR("A")
+// FLOW-SEQUENCE-START
+// ALIAS("A")
+// FLOW-SEQUENCE-END
+// STREAM-END
+//
+// 2. A tagged scalar:
+//
+// !!float "3.14" # A good approximation.
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// TAG("!!","float")
+// SCALAR("3.14",double-quoted)
+// STREAM-END
+//
+// 3. Various scalar styles:
+//
+// --- # Implicit empty plain scalars do not produce tokens.
+// --- a plain scalar
+// --- 'a single-quoted scalar'
+// --- "a double-quoted scalar"
+// --- |-
+// a literal scalar
+// --- >-
+// a folded
+// scalar
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// DOCUMENT-START
+// DOCUMENT-START
+// SCALAR("a plain scalar",plain)
+// DOCUMENT-START
+// SCALAR("a single-quoted scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("a double-quoted scalar",double-quoted)
+// DOCUMENT-START
+// SCALAR("a literal scalar",literal)
+// DOCUMENT-START
+// SCALAR("a folded scalar",folded)
+// STREAM-END
+//
+// Now it's time to review collection-related tokens. We will start with
+// flow collections:
+//
+// FLOW-SEQUENCE-START
+// FLOW-SEQUENCE-END
+// FLOW-MAPPING-START
+// FLOW-MAPPING-END
+// FLOW-ENTRY
+// KEY
+// VALUE
+//
+// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and
+// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}'
+// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the
+// indicators '?' and ':', which are used for denoting mapping keys and values,
+// are represented by the KEY and VALUE tokens.
+//
+// The following examples show flow collections:
+//
+// 1. A flow sequence:
+//
+// [item 1, item 2, item 3]
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// FLOW-SEQUENCE-START
+// SCALAR("item 1",plain)
+// FLOW-ENTRY
+// SCALAR("item 2",plain)
+// FLOW-ENTRY
+// SCALAR("item 3",plain)
+// FLOW-SEQUENCE-END
+// STREAM-END
+//
+// 2. A flow mapping:
+//
+// {
+// a simple key: a value, # Note that the KEY token is produced.
+// ? a complex key: another value,
+// }
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// FLOW-MAPPING-START
+// KEY
+// SCALAR("a simple key",plain)
+// VALUE
+// SCALAR("a value",plain)
+// FLOW-ENTRY
+// KEY
+// SCALAR("a complex key",plain)
+// VALUE
+// SCALAR("another value",plain)
+// FLOW-ENTRY
+// FLOW-MAPPING-END
+// STREAM-END
+//
+// A simple key is a key which is not denoted by the '?' indicator. Note that
+// the Scanner still produce the KEY token whenever it encounters a simple key.
+//
+// For scanning block collections, the following tokens are used (note that we
+// repeat KEY and VALUE here):
+//
+// BLOCK-SEQUENCE-START
+// BLOCK-MAPPING-START
+// BLOCK-END
+// BLOCK-ENTRY
+// KEY
+// VALUE
+//
+// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation
+// increase that precedes a block collection (cf. the INDENT token in Python).
+// The token BLOCK-END denote indentation decrease that ends a block collection
+// (cf. the DEDENT token in Python). However YAML has some syntax pecularities
+// that makes detections of these tokens more complex.
+//
+// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators
+// '-', '?', and ':' correspondingly.
+//
+// The following examples show how the tokens BLOCK-SEQUENCE-START,
+// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner:
+//
+// 1. Block sequences:
+//
+// - item 1
+// - item 2
+// -
+// - item 3.1
+// - item 3.2
+// -
+// key 1: value 1
+// key 2: value 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-ENTRY
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 3.1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 3.2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// 2. Block mappings:
+//
+// a simple key: a value # The KEY token is produced here.
+// ? a complex key
+// : another value
+// a mapping:
+// key 1: value 1
+// key 2: value 2
+// a sequence:
+// - item 1
+// - item 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("a simple key",plain)
+// VALUE
+// SCALAR("a value",plain)
+// KEY
+// SCALAR("a complex key",plain)
+// VALUE
+// SCALAR("another value",plain)
+// KEY
+// SCALAR("a mapping",plain)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// KEY
+// SCALAR("a sequence",plain)
+// VALUE
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// YAML does not always require to start a new block collection from a new
+// line. If the current line contains only '-', '?', and ':' indicators, a new
+// block collection may start at the current line. The following examples
+// illustrate this case:
+//
+// 1. Collections in a sequence:
+//
+// - - item 1
+// - item 2
+// - key 1: value 1
+// key 2: value 2
+// - ? complex key
+// : complex value
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("complex key")
+// VALUE
+// SCALAR("complex value")
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// 2. Collections in a mapping:
+//
+// ? a sequence
+// : - item 1
+// - item 2
+// ? a mapping
+// : key 1: value 1
+// key 2: value 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("a sequence",plain)
+// VALUE
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// KEY
+// SCALAR("a mapping",plain)
+// VALUE
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// YAML also permits non-indented sequences if they are included into a block
+// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced:
+//
+// key:
+// - item 1 # BLOCK-SEQUENCE-START is NOT produced here.
+// - item 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key",plain)
+// VALUE
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+//
+
+// Ensure that the buffer contains the required number of characters.
+// Return true on success, false on failure (reader error or memory error).
+func cache(parser *yaml_parser_t, length int) bool {
+ // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B)
+ return parser.unread >= length || yaml_parser_update_buffer(parser, length)
+}
+
+// Advance the buffer pointer.
+func skip(parser *yaml_parser_t) {
+ if !is_blank(parser.buffer, parser.buffer_pos) {
+ parser.newlines = 0
+ }
+ parser.mark.index++
+ parser.mark.column++
+ parser.unread--
+ parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+}
+
+func skip_line(parser *yaml_parser_t) {
+ if is_crlf(parser.buffer, parser.buffer_pos) {
+ parser.mark.index += 2
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread -= 2
+ parser.buffer_pos += 2
+ parser.newlines++
+ } else if is_break(parser.buffer, parser.buffer_pos) {
+ parser.mark.index++
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread--
+ parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+ parser.newlines++
+ }
+}
+
+// Copy a character to a string buffer and advance pointers.
+func read(parser *yaml_parser_t, s []byte) []byte {
+ if !is_blank(parser.buffer, parser.buffer_pos) {
+ parser.newlines = 0
+ }
+ w := width(parser.buffer[parser.buffer_pos])
+ if w == 0 {
+ panic("invalid character sequence")
+ }
+ if len(s) == 0 {
+ s = make([]byte, 0, 32)
+ }
+ if w == 1 && len(s)+w <= cap(s) {
+ s = s[:len(s)+1]
+ s[len(s)-1] = parser.buffer[parser.buffer_pos]
+ parser.buffer_pos++
+ } else {
+ s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...)
+ parser.buffer_pos += w
+ }
+ parser.mark.index++
+ parser.mark.column++
+ parser.unread--
+ return s
+}
+
+// Copy a line break character to a string buffer and advance pointers.
+func read_line(parser *yaml_parser_t, s []byte) []byte {
+ buf := parser.buffer
+ pos := parser.buffer_pos
+ switch {
+ case buf[pos] == '\r' && buf[pos+1] == '\n':
+ // CR LF . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 2
+ parser.mark.index++
+ parser.unread--
+ case buf[pos] == '\r' || buf[pos] == '\n':
+ // CR|LF . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 1
+ case buf[pos] == '\xC2' && buf[pos+1] == '\x85':
+ // NEL . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 2
+ case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'):
+ // LS|PS . LS|PS
+ s = append(s, buf[parser.buffer_pos:pos+3]...)
+ parser.buffer_pos += 3
+ default:
+ return s
+ }
+ parser.mark.index++
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread--
+ parser.newlines++
+ return s
+}
+
+// Get the next token.
+func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool {
+ // Erase the token object.
+ *token = yaml_token_t{} // [Go] Is this necessary?
+
+ // No tokens after STREAM-END or error.
+ if parser.stream_end_produced || parser.error != yaml_NO_ERROR {
+ return true
+ }
+
+ // Ensure that the tokens queue contains enough tokens.
+ if !parser.token_available {
+ if !yaml_parser_fetch_more_tokens(parser) {
+ return false
+ }
+ }
+
+ // Fetch the next token from the queue.
+ *token = parser.tokens[parser.tokens_head]
+ parser.tokens_head++
+ parser.tokens_parsed++
+ parser.token_available = false
+
+ if token.typ == yaml_STREAM_END_TOKEN {
+ parser.stream_end_produced = true
+ }
+ return true
+}
+
+// Set the scanner error and return false.
+func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool {
+ parser.error = yaml_SCANNER_ERROR
+ parser.context = context
+ parser.context_mark = context_mark
+ parser.problem = problem
+ parser.problem_mark = parser.mark
+ return false
+}
+
+func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool {
+ context := "while parsing a tag"
+ if directive {
+ context = "while parsing a %TAG directive"
+ }
+ return yaml_parser_set_scanner_error(parser, context, context_mark, problem)
+}
+
+func trace(args ...interface{}) func() {
+ pargs := append([]interface{}{"+++"}, args...)
+ fmt.Println(pargs...)
+ pargs = append([]interface{}{"---"}, args...)
+ return func() { fmt.Println(pargs...) }
+}
+
+// Ensure that the tokens queue contains at least one token which can be
+// returned to the Parser.
+func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
+ // While we need more tokens to fetch, do it.
+ for {
+ // [Go] The comment parsing logic requires a lookahead of two tokens
+ // so that foot comments may be parsed in time of associating them
+ // with the tokens that are parsed before them, and also for line
+ // comments to be transformed into head comments in some edge cases.
+ if parser.tokens_head < len(parser.tokens)-2 {
+ // If a potential simple key is at the head position, we need to fetch
+ // the next token to disambiguate it.
+ head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed]
+ if !ok {
+ break
+ } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok {
+ return false
+ } else if !valid {
+ break
+ }
+ }
+ // Fetch the next token.
+ if !yaml_parser_fetch_next_token(parser) {
+ return false
+ }
+ }
+
+ parser.token_available = true
+ return true
+}
+
+// The dispatcher for token fetchers.
+func yaml_parser_fetch_next_token(parser *yaml_parser_t) (ok bool) {
+ // Ensure that the buffer is initialized.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check if we just started scanning. Fetch STREAM-START then.
+ if !parser.stream_start_produced {
+ return yaml_parser_fetch_stream_start(parser)
+ }
+
+ scan_mark := parser.mark
+
+ // Eat whitespaces and comments until we reach the next token.
+ if !yaml_parser_scan_to_next_token(parser) {
+ return false
+ }
+
+ // [Go] While unrolling indents, transform the head comments of prior
+ // indentation levels observed after scan_start into foot comments at
+ // the respective indexes.
+
+ // Check the indentation level against the current column.
+ if !yaml_parser_unroll_indent(parser, parser.mark.column, scan_mark) {
+ return false
+ }
+
+ // Ensure that the buffer contains at least 4 characters. 4 is the length
+ // of the longest indicators ('--- ' and '... ').
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+
+ // Is it the end of the stream?
+ if is_z(parser.buffer, parser.buffer_pos) {
+ return yaml_parser_fetch_stream_end(parser)
+ }
+
+ // Is it a directive?
+ if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' {
+ return yaml_parser_fetch_directive(parser)
+ }
+
+ buf := parser.buffer
+ pos := parser.buffer_pos
+
+ // Is it the document start indicator?
+ if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) {
+ return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN)
+ }
+
+ // Is it the document end indicator?
+ if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) {
+ return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN)
+ }
+
+ comment_mark := parser.mark
+ if len(parser.tokens) > 0 && (parser.flow_level == 0 && buf[pos] == ':' || parser.flow_level > 0 && buf[pos] == ',') {
+ // Associate any following comments with the prior token.
+ comment_mark = parser.tokens[len(parser.tokens)-1].start_mark
+ }
+ defer func() {
+ if !ok {
+ return
+ }
+ if len(parser.tokens) > 0 && parser.tokens[len(parser.tokens)-1].typ == yaml_BLOCK_ENTRY_TOKEN {
+ // Sequence indicators alone have no line comments. It becomes
+ // a head comment for whatever follows.
+ return
+ }
+ if !yaml_parser_scan_line_comment(parser, comment_mark) {
+ ok = false
+ return
+ }
+ }()
+
+ // Is it the flow sequence start indicator?
+ if buf[pos] == '[' {
+ return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN)
+ }
+
+ // Is it the flow mapping start indicator?
+ if parser.buffer[parser.buffer_pos] == '{' {
+ return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN)
+ }
+
+ // Is it the flow sequence end indicator?
+ if parser.buffer[parser.buffer_pos] == ']' {
+ return yaml_parser_fetch_flow_collection_end(parser,
+ yaml_FLOW_SEQUENCE_END_TOKEN)
+ }
+
+ // Is it the flow mapping end indicator?
+ if parser.buffer[parser.buffer_pos] == '}' {
+ return yaml_parser_fetch_flow_collection_end(parser,
+ yaml_FLOW_MAPPING_END_TOKEN)
+ }
+
+ // Is it the flow entry indicator?
+ if parser.buffer[parser.buffer_pos] == ',' {
+ return yaml_parser_fetch_flow_entry(parser)
+ }
+
+ // Is it the block entry indicator?
+ if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) {
+ return yaml_parser_fetch_block_entry(parser)
+ }
+
+ // Is it the key indicator?
+ if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_key(parser)
+ }
+
+ // Is it the value indicator?
+ if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_value(parser)
+ }
+
+ // Is it an alias?
+ if parser.buffer[parser.buffer_pos] == '*' {
+ return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN)
+ }
+
+ // Is it an anchor?
+ if parser.buffer[parser.buffer_pos] == '&' {
+ return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN)
+ }
+
+ // Is it a tag?
+ if parser.buffer[parser.buffer_pos] == '!' {
+ return yaml_parser_fetch_tag(parser)
+ }
+
+ // Is it a literal scalar?
+ if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 {
+ return yaml_parser_fetch_block_scalar(parser, true)
+ }
+
+ // Is it a folded scalar?
+ if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 {
+ return yaml_parser_fetch_block_scalar(parser, false)
+ }
+
+ // Is it a single-quoted scalar?
+ if parser.buffer[parser.buffer_pos] == '\'' {
+ return yaml_parser_fetch_flow_scalar(parser, true)
+ }
+
+ // Is it a double-quoted scalar?
+ if parser.buffer[parser.buffer_pos] == '"' {
+ return yaml_parser_fetch_flow_scalar(parser, false)
+ }
+
+ // Is it a plain scalar?
+ //
+ // A plain scalar may start with any non-blank characters except
+ //
+ // '-', '?', ':', ',', '[', ']', '{', '}',
+ // '#', '&', '*', '!', '|', '>', '\'', '\"',
+ // '%', '@', '`'.
+ //
+ // In the block context (and, for the '-' indicator, in the flow context
+ // too), it may also start with the characters
+ //
+ // '-', '?', ':'
+ //
+ // if it is followed by a non-space character.
+ //
+ // The last rule is more restrictive than the specification requires.
+ // [Go] TODO Make this logic more reasonable.
+ //switch parser.buffer[parser.buffer_pos] {
+ //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`':
+ //}
+ if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' ||
+ parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' ||
+ parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+ parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' ||
+ parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' ||
+ parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' ||
+ parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' ||
+ parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' ||
+ parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') ||
+ (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) ||
+ (parser.flow_level == 0 &&
+ (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') &&
+ !is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_plain_scalar(parser)
+ }
+
+ // If we don't determine the token type so far, it is an error.
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning for the next token", parser.mark,
+ "found character that cannot start any token")
+}
+
+func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) {
+ if !simple_key.possible {
+ return false, true
+ }
+
+ // The 1.2 specification says:
+ //
+ // "If the ? indicator is omitted, parsing needs to see past the
+ // implicit key to recognize it as such. To limit the amount of
+ // lookahead required, the “:” indicator must appear at most 1024
+ // Unicode characters beyond the start of the key. In addition, the key
+ // is restricted to a single line."
+ //
+ if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index {
+ // Check if the potential simple key to be removed is required.
+ if simple_key.required {
+ return false, yaml_parser_set_scanner_error(parser,
+ "while scanning a simple key", simple_key.mark,
+ "could not find expected ':'")
+ }
+ simple_key.possible = false
+ return false, true
+ }
+ return true, true
+}
+
+// Check if a simple key may start at the current position and add it if
+// needed.
+func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
+ // A simple key is required at the current position if the scanner is in
+ // the block context and the current column coincides with the indentation
+ // level.
+
+ required := parser.flow_level == 0 && parser.indent == parser.mark.column
+
+ //
+ // If the current position may start a simple key, save it.
+ //
+ if parser.simple_key_allowed {
+ simple_key := yaml_simple_key_t{
+ possible: true,
+ required: required,
+ token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+ mark: parser.mark,
+ }
+
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+ parser.simple_keys[len(parser.simple_keys)-1] = simple_key
+ parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1
+ }
+ return true
+}
+
+// Remove a potential simple key at the current flow level.
+func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
+ i := len(parser.simple_keys) - 1
+ if parser.simple_keys[i].possible {
+ // If the key is required, it is an error.
+ if parser.simple_keys[i].required {
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning a simple key", parser.simple_keys[i].mark,
+ "could not find expected ':'")
+ }
+ // Remove the key from the stack.
+ parser.simple_keys[i].possible = false
+ delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number)
+ }
+ return true
+}
+
+// max_flow_level limits the flow_level
+const max_flow_level = 10000
+
+// Increase the flow level and resize the simple key list if needed.
+func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
+ // Reset the simple key on the next level.
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{
+ possible: false,
+ required: false,
+ token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+ mark: parser.mark,
+ })
+
+ // Increase the flow level.
+ parser.flow_level++
+ if parser.flow_level > max_flow_level {
+ return yaml_parser_set_scanner_error(parser,
+ "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark,
+ fmt.Sprintf("exceeded max depth of %d", max_flow_level))
+ }
+ return true
+}
+
+// Decrease the flow level.
+func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
+ if parser.flow_level > 0 {
+ parser.flow_level--
+ last := len(parser.simple_keys) - 1
+ delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number)
+ parser.simple_keys = parser.simple_keys[:last]
+ }
+ return true
+}
+
+// max_indents limits the indents stack size
+const max_indents = 10000
+
+// Push the current indentation level to the stack and set the new level
+// the current column is greater than the indentation level. In this case,
+// append or insert the specified token into the token queue.
+func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool {
+ // In the flow context, do nothing.
+ if parser.flow_level > 0 {
+ return true
+ }
+
+ if parser.indent < column {
+ // Push the current indentation level to the stack and set the new
+ // indentation level.
+ parser.indents = append(parser.indents, parser.indent)
+ parser.indent = column
+ if len(parser.indents) > max_indents {
+ return yaml_parser_set_scanner_error(parser,
+ "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark,
+ fmt.Sprintf("exceeded max depth of %d", max_indents))
+ }
+
+ // Create a token and insert it into the queue.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: mark,
+ end_mark: mark,
+ }
+ if number > -1 {
+ number -= parser.tokens_parsed
+ }
+ yaml_insert_token(parser, number, &token)
+ }
+ return true
+}
+
+// Pop indentation levels from the indents stack until the current level
+// becomes less or equal to the column. For each indentation level, append
+// the BLOCK-END token.
+func yaml_parser_unroll_indent(parser *yaml_parser_t, column int, scan_mark yaml_mark_t) bool {
+ // In the flow context, do nothing.
+ if parser.flow_level > 0 {
+ return true
+ }
+
+ block_mark := scan_mark
+ block_mark.index--
+
+ // Loop through the indentation levels in the stack.
+ for parser.indent > column {
+
+ // [Go] Reposition the end token before potential following
+ // foot comments of parent blocks. For that, search
+ // backwards for recent comments that were at the same
+ // indent as the block that is ending now.
+ stop_index := block_mark.index
+ for i := len(parser.comments) - 1; i >= 0; i-- {
+ comment := &parser.comments[i]
+
+ if comment.end_mark.index < stop_index {
+ // Don't go back beyond the start of the comment/whitespace scan, unless column < 0.
+ // If requested indent column is < 0, then the document is over and everything else
+ // is a foot anyway.
+ break
+ }
+ if comment.start_mark.column == parser.indent+1 {
+ // This is a good match. But maybe there's a former comment
+ // at that same indent level, so keep searching.
+ block_mark = comment.start_mark
+ }
+
+ // While the end of the former comment matches with
+ // the start of the following one, we know there's
+ // nothing in between and scanning is still safe.
+ stop_index = comment.scan_mark.index
+ }
+
+ // Create a token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_BLOCK_END_TOKEN,
+ start_mark: block_mark,
+ end_mark: block_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+
+ // Pop the indentation level.
+ parser.indent = parser.indents[len(parser.indents)-1]
+ parser.indents = parser.indents[:len(parser.indents)-1]
+ }
+ return true
+}
+
+// Initialize the scanner and produce the STREAM-START token.
+func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool {
+
+ // Set the initial indentation.
+ parser.indent = -1
+
+ // Initialize the simple key stack.
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+
+ parser.simple_keys_by_tok = make(map[int]int)
+
+ // A simple key is allowed at the beginning of the stream.
+ parser.simple_key_allowed = true
+
+ // We have started.
+ parser.stream_start_produced = true
+
+ // Create the STREAM-START token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_STREAM_START_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ encoding: parser.encoding,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the STREAM-END token and shut down the scanner.
+func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool {
+
+ // Force new line.
+ if parser.mark.column != 0 {
+ parser.mark.column = 0
+ parser.mark.line++
+ }
+
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1, parser.mark) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Create the STREAM-END token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_STREAM_END_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token.
+func yaml_parser_fetch_directive(parser *yaml_parser_t) bool {
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1, parser.mark) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token.
+ token := yaml_token_t{}
+ if !yaml_parser_scan_directive(parser, &token) {
+ return false
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the DOCUMENT-START or DOCUMENT-END token.
+func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1, parser.mark) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Consume the token.
+ start_mark := parser.mark
+
+ skip(parser)
+ skip(parser)
+ skip(parser)
+
+ end_mark := parser.mark
+
+ // Create the DOCUMENT-START or DOCUMENT-END token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token.
+func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+
+ // The indicators '[' and '{' may start a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // Increase the flow level.
+ if !yaml_parser_increase_flow_level(parser) {
+ return false
+ }
+
+ // A simple key may follow the indicators '[' and '{'.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token.
+func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // Reset any potential simple key on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Decrease the flow level.
+ if !yaml_parser_decrease_flow_level(parser) {
+ return false
+ }
+
+ // No simple keys after the indicators ']' and '}'.
+ parser.simple_key_allowed = false
+
+ // Consume the token.
+
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-ENTRY token.
+func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool {
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after ','.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-ENTRY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_FLOW_ENTRY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the BLOCK-ENTRY token.
+func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool {
+ // Check if the scanner is in the block context.
+ if parser.flow_level == 0 {
+ // Check if we are allowed to start a new entry.
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "block sequence entries are not allowed in this context")
+ }
+ // Add the BLOCK-SEQUENCE-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) {
+ return false
+ }
+ } else {
+ // It is an error for the '-' indicator to occur in the flow context,
+ // but we let the Parser detect and report about it because the Parser
+ // is able to point to the context.
+ }
+
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after '-'.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the BLOCK-ENTRY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_BLOCK_ENTRY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the KEY token.
+func yaml_parser_fetch_key(parser *yaml_parser_t) bool {
+
+ // In the block context, additional checks are required.
+ if parser.flow_level == 0 {
+ // Check if we are allowed to start a new key (not nessesary simple).
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "mapping keys are not allowed in this context")
+ }
+ // Add the BLOCK-MAPPING-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+ return false
+ }
+ }
+
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after '?' in the block context.
+ parser.simple_key_allowed = parser.flow_level == 0
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the KEY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_KEY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the VALUE token.
+func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
+
+ simple_key := &parser.simple_keys[len(parser.simple_keys)-1]
+
+ // Have we found a simple key?
+ if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok {
+ return false
+
+ } else if valid {
+
+ // Create the KEY token and insert it into the queue.
+ token := yaml_token_t{
+ typ: yaml_KEY_TOKEN,
+ start_mark: simple_key.mark,
+ end_mark: simple_key.mark,
+ }
+ yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token)
+
+ // In the block context, we may need to add the BLOCK-MAPPING-START token.
+ if !yaml_parser_roll_indent(parser, simple_key.mark.column,
+ simple_key.token_number,
+ yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) {
+ return false
+ }
+
+ // Remove the simple key.
+ simple_key.possible = false
+ delete(parser.simple_keys_by_tok, simple_key.token_number)
+
+ // A simple key cannot follow another simple key.
+ parser.simple_key_allowed = false
+
+ } else {
+ // The ':' indicator follows a complex key.
+
+ // In the block context, extra checks are required.
+ if parser.flow_level == 0 {
+
+ // Check if we are allowed to start a complex value.
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "mapping values are not allowed in this context")
+ }
+
+ // Add the BLOCK-MAPPING-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+ return false
+ }
+ }
+
+ // Simple keys after ':' are allowed in the block context.
+ parser.simple_key_allowed = parser.flow_level == 0
+ }
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the VALUE token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_VALUE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the ALIAS or ANCHOR token.
+func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // An anchor or an alias could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow an anchor or an alias.
+ parser.simple_key_allowed = false
+
+ // Create the ALIAS or ANCHOR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_anchor(parser, &token, typ) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the TAG token.
+func yaml_parser_fetch_tag(parser *yaml_parser_t) bool {
+ // A tag could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a tag.
+ parser.simple_key_allowed = false
+
+ // Create the TAG token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_tag(parser, &token) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens.
+func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool {
+ // Remove any potential simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // A simple key may follow a block scalar.
+ parser.simple_key_allowed = true
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_block_scalar(parser, &token, literal) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens.
+func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool {
+ // A plain scalar could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a flow scalar.
+ parser.simple_key_allowed = false
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_flow_scalar(parser, &token, single) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,plain) token.
+func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool {
+ // A plain scalar could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a flow scalar.
+ parser.simple_key_allowed = false
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_plain_scalar(parser, &token) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Eat whitespaces and comments until the next token is found.
+func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool {
+
+ scan_mark := parser.mark
+
+ // Until the next token is not found.
+ for {
+ // Allow the BOM mark to start a line.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ }
+
+ // Eat whitespaces.
+ // Tabs are allowed:
+ // - in the flow context
+ // - in the block context, but not at the beginning of the line or
+ // after '-', '?', or ':' (complex value).
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if we just had a line comment under a sequence entry that
+ // looks more like a header to the following content. Similar to this:
+ //
+ // - # The comment
+ // - Some data
+ //
+ // If so, transform the line comment to a head comment and reposition.
+ if len(parser.comments) > 0 && len(parser.tokens) > 1 {
+ tokenA := parser.tokens[len(parser.tokens)-2]
+ tokenB := parser.tokens[len(parser.tokens)-1]
+ comment := &parser.comments[len(parser.comments)-1]
+ if tokenA.typ == yaml_BLOCK_SEQUENCE_START_TOKEN && tokenB.typ == yaml_BLOCK_ENTRY_TOKEN && len(comment.line) > 0 && !is_break(parser.buffer, parser.buffer_pos) {
+ // If it was in the prior line, reposition so it becomes a
+ // header of the follow up token. Otherwise, keep it in place
+ // so it becomes a header of the former.
+ comment.head = comment.line
+ comment.line = nil
+ if comment.start_mark.line == parser.mark.line-1 {
+ comment.token_mark = parser.mark
+ }
+ }
+ }
+
+ // Eat a comment until a line break.
+ if parser.buffer[parser.buffer_pos] == '#' {
+ if !yaml_parser_scan_comments(parser, scan_mark) {
+ return false
+ }
+ }
+
+ // If it is a line break, eat it.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+
+ // In the block context, a new line may start a simple key.
+ if parser.flow_level == 0 {
+ parser.simple_key_allowed = true
+ }
+ } else {
+ break // We have found a token.
+ }
+ }
+
+ return true
+}
+
+// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token.
+//
+// Scope:
+//
+// %YAML 1.1 # a comment \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool {
+ // Eat '%'.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Scan the directive name.
+ var name []byte
+ if !yaml_parser_scan_directive_name(parser, start_mark, &name) {
+ return false
+ }
+
+ // Is it a YAML directive?
+ if bytes.Equal(name, []byte("YAML")) {
+ // Scan the VERSION directive value.
+ var major, minor int8
+ if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) {
+ return false
+ }
+ end_mark := parser.mark
+
+ // Create a VERSION-DIRECTIVE token.
+ *token = yaml_token_t{
+ typ: yaml_VERSION_DIRECTIVE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ major: major,
+ minor: minor,
+ }
+
+ // Is it a TAG directive?
+ } else if bytes.Equal(name, []byte("TAG")) {
+ // Scan the TAG directive value.
+ var handle, prefix []byte
+ if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) {
+ return false
+ }
+ end_mark := parser.mark
+
+ // Create a TAG-DIRECTIVE token.
+ *token = yaml_token_t{
+ typ: yaml_TAG_DIRECTIVE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: handle,
+ prefix: prefix,
+ }
+
+ // Unknown directive.
+ } else {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "found unknown directive name")
+ return false
+ }
+
+ // Eat the rest of the line including any comments.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ if parser.buffer[parser.buffer_pos] == '#' {
+ // [Go] Discard this inline comment for the time being.
+ //if !yaml_parser_scan_line_comment(parser, start_mark) {
+ // return false
+ //}
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // Check if we are at the end of the line.
+ if !is_breakz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "did not find expected comment or line break")
+ return false
+ }
+
+ // Eat a line break.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ }
+
+ return true
+}
+
+// Scan the directive name.
+//
+// Scope:
+//
+// %YAML 1.1 # a comment \n
+// ^^^^
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^
+func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool {
+ // Consume the directive name.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ var s []byte
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the name is empty.
+ if len(s) == 0 {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "could not find expected directive name")
+ return false
+ }
+
+ // Check for an blank character after the name.
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "found unexpected non-alphabetical character")
+ return false
+ }
+ *name = s
+ return true
+}
+
+// Scan the value of VERSION-DIRECTIVE.
+//
+// Scope:
+//
+// %YAML 1.1 # a comment \n
+// ^^^^^^
+func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool {
+ // Eat whitespaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Consume the major version number.
+ if !yaml_parser_scan_version_directive_number(parser, start_mark, major) {
+ return false
+ }
+
+ // Eat '.'.
+ if parser.buffer[parser.buffer_pos] != '.' {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "did not find expected digit or '.' character")
+ }
+
+ skip(parser)
+
+ // Consume the minor version number.
+ if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) {
+ return false
+ }
+ return true
+}
+
+const max_number_length = 2
+
+// Scan the version number of VERSION-DIRECTIVE.
+//
+// Scope:
+//
+// %YAML 1.1 # a comment \n
+// ^
+// %YAML 1.1 # a comment \n
+// ^
+func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool {
+
+ // Repeat while the next character is digit.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ var value, length int8
+ for is_digit(parser.buffer, parser.buffer_pos) {
+ // Check if the number is too long.
+ length++
+ if length > max_number_length {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "found extremely long version number")
+ }
+ value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos))
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the number was present.
+ if length == 0 {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "did not find expected version number")
+ }
+ *number = value
+ return true
+}
+
+// Scan the value of a TAG-DIRECTIVE token.
+//
+// Scope:
+//
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool {
+ var handle_value, prefix_value []byte
+
+ // Eat whitespaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Scan a handle.
+ if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) {
+ return false
+ }
+
+ // Expect a whitespace.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blank(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+ start_mark, "did not find expected whitespace")
+ return false
+ }
+
+ // Eat whitespaces.
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Scan a prefix.
+ if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) {
+ return false
+ }
+
+ // Expect a whitespace or line break.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+ start_mark, "did not find expected whitespace or line break")
+ return false
+ }
+
+ *handle = handle_value
+ *prefix = prefix_value
+ return true
+}
+
+func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool {
+ var s []byte
+
+ // Eat the indicator character.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Consume the value.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ end_mark := parser.mark
+
+ /*
+ * Check if length of the anchor is greater than 0 and it is followed by
+ * a whitespace character or one of the indicators:
+ *
+ * '?', ':', ',', ']', '}', '%', '@', '`'.
+ */
+
+ if len(s) == 0 ||
+ !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' ||
+ parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' ||
+ parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' ||
+ parser.buffer[parser.buffer_pos] == '`') {
+ context := "while scanning an alias"
+ if typ == yaml_ANCHOR_TOKEN {
+ context = "while scanning an anchor"
+ }
+ yaml_parser_set_scanner_error(parser, context, start_mark,
+ "did not find expected alphabetic or numeric character")
+ return false
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ }
+
+ return true
+}
+
+/*
+ * Scan a TAG token.
+ */
+
+func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool {
+ var handle, suffix []byte
+
+ start_mark := parser.mark
+
+ // Check if the tag is in the canonical form.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ if parser.buffer[parser.buffer_pos+1] == '<' {
+ // Keep the handle as ''
+
+ // Eat '!<'
+ skip(parser)
+ skip(parser)
+
+ // Consume the tag value.
+ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+ return false
+ }
+
+ // Check for '>' and eat it.
+ if parser.buffer[parser.buffer_pos] != '>' {
+ yaml_parser_set_scanner_error(parser, "while scanning a tag",
+ start_mark, "did not find the expected '>'")
+ return false
+ }
+
+ skip(parser)
+ } else {
+ // The tag has either the '!suffix' or the '!handle!suffix' form.
+
+ // First, try to scan a handle.
+ if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) {
+ return false
+ }
+
+ // Check if it is, indeed, handle.
+ if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' {
+ // Scan the suffix now.
+ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+ return false
+ }
+ } else {
+ // It wasn't a handle after all. Scan the rest of the tag.
+ if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) {
+ return false
+ }
+
+ // Set the handle to '!'.
+ handle = []byte{'!'}
+
+ // A special case: the '!' tag. Set the handle to '' and the
+ // suffix to '!'.
+ if len(suffix) == 0 {
+ handle, suffix = suffix, handle
+ }
+ }
+ }
+
+ // Check the character which ends the tag.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a tag",
+ start_mark, "did not find expected whitespace or line break")
+ return false
+ }
+
+ end_mark := parser.mark
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_TAG_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: handle,
+ suffix: suffix,
+ }
+ return true
+}
+
+// Scan a tag handle.
+func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool {
+ // Check the initial '!' character.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.buffer[parser.buffer_pos] != '!' {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected '!'")
+ return false
+ }
+
+ var s []byte
+
+ // Copy the '!' character.
+ s = read(parser, s)
+
+ // Copy all subsequent alphabetical and numerical characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the trailing character is '!' and copy it.
+ if parser.buffer[parser.buffer_pos] == '!' {
+ s = read(parser, s)
+ } else {
+ // It's either the '!' tag or not really a tag handle. If it's a %TAG
+ // directive, it's an error. If it's a tag token, it must be a part of URI.
+ if directive && string(s) != "!" {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected '!'")
+ return false
+ }
+ }
+
+ *handle = s
+ return true
+}
+
+// Scan a tag.
+func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool {
+ //size_t length = head ? strlen((char *)head) : 0
+ var s []byte
+ hasTag := len(head) > 0
+
+ // Copy the head if needed.
+ //
+ // Note that we don't copy the leading '!' character.
+ if len(head) > 1 {
+ s = append(s, head[1:]...)
+ }
+
+ // Scan the tag.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // The set of characters that may appear in URI is as follows:
+ //
+ // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&',
+ // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']',
+ // '%'.
+ // [Go] TODO Convert this into more reasonable logic.
+ for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' ||
+ parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' ||
+ parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' ||
+ parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' ||
+ parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' ||
+ parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' ||
+ parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' ||
+ parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' ||
+ parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' ||
+ parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' ||
+ parser.buffer[parser.buffer_pos] == '%' {
+ // Check if it is a URI-escape sequence.
+ if parser.buffer[parser.buffer_pos] == '%' {
+ if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) {
+ return false
+ }
+ } else {
+ s = read(parser, s)
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ hasTag = true
+ }
+
+ if !hasTag {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected tag URI")
+ return false
+ }
+ *uri = s
+ return true
+}
+
+// Decode an URI-escape sequence corresponding to a single UTF-8 character.
+func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool {
+
+ // Decode the required number of characters.
+ w := 1024
+ for w > 0 {
+ // Check for a URI-escaped octet.
+ if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+ return false
+ }
+
+ if !(parser.buffer[parser.buffer_pos] == '%' &&
+ is_hex(parser.buffer, parser.buffer_pos+1) &&
+ is_hex(parser.buffer, parser.buffer_pos+2)) {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find URI escaped octet")
+ }
+
+ // Get the octet.
+ octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2))
+
+ // If it is the leading octet, determine the length of the UTF-8 sequence.
+ if w == 1024 {
+ w = width(octet)
+ if w == 0 {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "found an incorrect leading UTF-8 octet")
+ }
+ } else {
+ // Check if the trailing octet is correct.
+ if octet&0xC0 != 0x80 {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "found an incorrect trailing UTF-8 octet")
+ }
+ }
+
+ // Copy the octet and move the pointers.
+ *s = append(*s, octet)
+ skip(parser)
+ skip(parser)
+ skip(parser)
+ w--
+ }
+ return true
+}
+
+// Scan a block scalar.
+func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool {
+ // Eat the indicator '|' or '>'.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Scan the additional block scalar indicators.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check for a chomping indicator.
+ var chomping, increment int
+ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+ // Set the chomping method and eat the indicator.
+ if parser.buffer[parser.buffer_pos] == '+' {
+ chomping = +1
+ } else {
+ chomping = -1
+ }
+ skip(parser)
+
+ // Check for an indentation indicator.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if is_digit(parser.buffer, parser.buffer_pos) {
+ // Check that the indentation is greater than 0.
+ if parser.buffer[parser.buffer_pos] == '0' {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found an indentation indicator equal to 0")
+ return false
+ }
+
+ // Get the indentation level and eat the indicator.
+ increment = as_digit(parser.buffer, parser.buffer_pos)
+ skip(parser)
+ }
+
+ } else if is_digit(parser.buffer, parser.buffer_pos) {
+ // Do the same as above, but in the opposite order.
+
+ if parser.buffer[parser.buffer_pos] == '0' {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found an indentation indicator equal to 0")
+ return false
+ }
+ increment = as_digit(parser.buffer, parser.buffer_pos)
+ skip(parser)
+
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+ if parser.buffer[parser.buffer_pos] == '+' {
+ chomping = +1
+ } else {
+ chomping = -1
+ }
+ skip(parser)
+ }
+ }
+
+ // Eat whitespaces and comments to the end of the line.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ if parser.buffer[parser.buffer_pos] == '#' {
+ if !yaml_parser_scan_line_comment(parser, start_mark) {
+ return false
+ }
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // Check if we are at the end of the line.
+ if !is_breakz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "did not find expected comment or line break")
+ return false
+ }
+
+ // Eat a line break.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ }
+
+ end_mark := parser.mark
+
+ // Set the indentation level if it was specified.
+ var indent int
+ if increment > 0 {
+ if parser.indent >= 0 {
+ indent = parser.indent + increment
+ } else {
+ indent = increment
+ }
+ }
+
+ // Scan the leading line breaks and determine the indentation level if needed.
+ var s, leading_break, trailing_breaks []byte
+ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+ return false
+ }
+
+ // Scan the block scalar content.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ var leading_blank, trailing_blank bool
+ for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) {
+ // We are at the beginning of a non-empty line.
+
+ // Is it a trailing whitespace?
+ trailing_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+ // Check if we need to fold the leading line break.
+ if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' {
+ // Do we need to join the lines by space?
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ }
+ } else {
+ s = append(s, leading_break...)
+ }
+ leading_break = leading_break[:0]
+
+ // Append the remaining line breaks.
+ s = append(s, trailing_breaks...)
+ trailing_breaks = trailing_breaks[:0]
+
+ // Is it a leading whitespace?
+ leading_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+ // Consume the current line.
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Consume the line break.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ leading_break = read_line(parser, leading_break)
+
+ // Eat the following indentation spaces and line breaks.
+ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+ return false
+ }
+ }
+
+ // Chomp the tail.
+ if chomping != -1 {
+ s = append(s, leading_break...)
+ }
+ if chomping == 1 {
+ s = append(s, trailing_breaks...)
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_LITERAL_SCALAR_STYLE,
+ }
+ if !literal {
+ token.style = yaml_FOLDED_SCALAR_STYLE
+ }
+ return true
+}
+
+// Scan indentation spaces and line breaks for a block scalar. Determine the
+// indentation level if needed.
+func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool {
+ *end_mark = parser.mark
+
+ // Eat the indentation spaces and line breaks.
+ max_indent := 0
+ for {
+ // Eat the indentation spaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ if parser.mark.column > max_indent {
+ max_indent = parser.mark.column
+ }
+
+ // Check for a tab character messing the indentation.
+ if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) {
+ return yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found a tab character where an indentation space is expected")
+ }
+
+ // Have we found a non-empty line?
+ if !is_break(parser.buffer, parser.buffer_pos) {
+ break
+ }
+
+ // Consume the line break.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ // [Go] Should really be returning breaks instead.
+ *breaks = read_line(parser, *breaks)
+ *end_mark = parser.mark
+ }
+
+ // Determine the indentation level if needed.
+ if *indent == 0 {
+ *indent = max_indent
+ if *indent < parser.indent+1 {
+ *indent = parser.indent + 1
+ }
+ if *indent < 1 {
+ *indent = 1
+ }
+ }
+ return true
+}
+
+// Scan a quoted scalar.
+func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool {
+ // Eat the left quote.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Consume the content of the quoted scalar.
+ var s, leading_break, trailing_breaks, whitespaces []byte
+ for {
+ // Check that there are no document indicators at the beginning of the line.
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+
+ if parser.mark.column == 0 &&
+ ((parser.buffer[parser.buffer_pos+0] == '-' &&
+ parser.buffer[parser.buffer_pos+1] == '-' &&
+ parser.buffer[parser.buffer_pos+2] == '-') ||
+ (parser.buffer[parser.buffer_pos+0] == '.' &&
+ parser.buffer[parser.buffer_pos+1] == '.' &&
+ parser.buffer[parser.buffer_pos+2] == '.')) &&
+ is_blankz(parser.buffer, parser.buffer_pos+3) {
+ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+ start_mark, "found unexpected document indicator")
+ return false
+ }
+
+ // Check for EOF.
+ if is_z(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+ start_mark, "found unexpected end of stream")
+ return false
+ }
+
+ // Consume non-blank characters.
+ leading_blanks := false
+ for !is_blankz(parser.buffer, parser.buffer_pos) {
+ if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' {
+ // Is is an escaped single quote.
+ s = append(s, '\'')
+ skip(parser)
+ skip(parser)
+
+ } else if single && parser.buffer[parser.buffer_pos] == '\'' {
+ // It is a right single quote.
+ break
+ } else if !single && parser.buffer[parser.buffer_pos] == '"' {
+ // It is a right double quote.
+ break
+
+ } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) {
+ // It is an escaped line break.
+ if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+ return false
+ }
+ skip(parser)
+ skip_line(parser)
+ leading_blanks = true
+ break
+
+ } else if !single && parser.buffer[parser.buffer_pos] == '\\' {
+ // It is an escape sequence.
+ code_length := 0
+
+ // Check the escape character.
+ switch parser.buffer[parser.buffer_pos+1] {
+ case '0':
+ s = append(s, 0)
+ case 'a':
+ s = append(s, '\x07')
+ case 'b':
+ s = append(s, '\x08')
+ case 't', '\t':
+ s = append(s, '\x09')
+ case 'n':
+ s = append(s, '\x0A')
+ case 'v':
+ s = append(s, '\x0B')
+ case 'f':
+ s = append(s, '\x0C')
+ case 'r':
+ s = append(s, '\x0D')
+ case 'e':
+ s = append(s, '\x1B')
+ case ' ':
+ s = append(s, '\x20')
+ case '"':
+ s = append(s, '"')
+ case '\'':
+ s = append(s, '\'')
+ case '\\':
+ s = append(s, '\\')
+ case 'N': // NEL (#x85)
+ s = append(s, '\xC2')
+ s = append(s, '\x85')
+ case '_': // #xA0
+ s = append(s, '\xC2')
+ s = append(s, '\xA0')
+ case 'L': // LS (#x2028)
+ s = append(s, '\xE2')
+ s = append(s, '\x80')
+ s = append(s, '\xA8')
+ case 'P': // PS (#x2029)
+ s = append(s, '\xE2')
+ s = append(s, '\x80')
+ s = append(s, '\xA9')
+ case 'x':
+ code_length = 2
+ case 'u':
+ code_length = 4
+ case 'U':
+ code_length = 8
+ default:
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "found unknown escape character")
+ return false
+ }
+
+ skip(parser)
+ skip(parser)
+
+ // Consume an arbitrary escape code.
+ if code_length > 0 {
+ var value int
+
+ // Scan the character value.
+ if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) {
+ return false
+ }
+ for k := 0; k < code_length; k++ {
+ if !is_hex(parser.buffer, parser.buffer_pos+k) {
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "did not find expected hexdecimal number")
+ return false
+ }
+ value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k)
+ }
+
+ // Check the value and write the character.
+ if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF {
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "found invalid Unicode character escape code")
+ return false
+ }
+ if value <= 0x7F {
+ s = append(s, byte(value))
+ } else if value <= 0x7FF {
+ s = append(s, byte(0xC0+(value>>6)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ } else if value <= 0xFFFF {
+ s = append(s, byte(0xE0+(value>>12)))
+ s = append(s, byte(0x80+((value>>6)&0x3F)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ } else {
+ s = append(s, byte(0xF0+(value>>18)))
+ s = append(s, byte(0x80+((value>>12)&0x3F)))
+ s = append(s, byte(0x80+((value>>6)&0x3F)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ }
+
+ // Advance the pointer.
+ for k := 0; k < code_length; k++ {
+ skip(parser)
+ }
+ }
+ } else {
+ // It is a non-escaped non-blank character.
+ s = read(parser, s)
+ }
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ }
+
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check if we are at the end of the scalar.
+ if single {
+ if parser.buffer[parser.buffer_pos] == '\'' {
+ break
+ }
+ } else {
+ if parser.buffer[parser.buffer_pos] == '"' {
+ break
+ }
+ }
+
+ // Consume blank characters.
+ for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+ if is_blank(parser.buffer, parser.buffer_pos) {
+ // Consume a space or a tab character.
+ if !leading_blanks {
+ whitespaces = read(parser, whitespaces)
+ } else {
+ skip(parser)
+ }
+ } else {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ // Check if it is a first line break.
+ if !leading_blanks {
+ whitespaces = whitespaces[:0]
+ leading_break = read_line(parser, leading_break)
+ leading_blanks = true
+ } else {
+ trailing_breaks = read_line(parser, trailing_breaks)
+ }
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Join the whitespaces or fold line breaks.
+ if leading_blanks {
+ // Do we need to fold line breaks?
+ if len(leading_break) > 0 && leading_break[0] == '\n' {
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ } else {
+ s = append(s, trailing_breaks...)
+ }
+ } else {
+ s = append(s, leading_break...)
+ s = append(s, trailing_breaks...)
+ }
+ trailing_breaks = trailing_breaks[:0]
+ leading_break = leading_break[:0]
+ } else {
+ s = append(s, whitespaces...)
+ whitespaces = whitespaces[:0]
+ }
+ }
+
+ // Eat the right quote.
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_SINGLE_QUOTED_SCALAR_STYLE,
+ }
+ if !single {
+ token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ return true
+}
+
+// Scan a plain scalar.
+func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool {
+
+ var s, leading_break, trailing_breaks, whitespaces []byte
+ var leading_blanks bool
+ var indent = parser.indent + 1
+
+ start_mark := parser.mark
+ end_mark := parser.mark
+
+ // Consume the content of the plain scalar.
+ for {
+ // Check for a document indicator.
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+ if parser.mark.column == 0 &&
+ ((parser.buffer[parser.buffer_pos+0] == '-' &&
+ parser.buffer[parser.buffer_pos+1] == '-' &&
+ parser.buffer[parser.buffer_pos+2] == '-') ||
+ (parser.buffer[parser.buffer_pos+0] == '.' &&
+ parser.buffer[parser.buffer_pos+1] == '.' &&
+ parser.buffer[parser.buffer_pos+2] == '.')) &&
+ is_blankz(parser.buffer, parser.buffer_pos+3) {
+ break
+ }
+
+ // Check for a comment.
+ if parser.buffer[parser.buffer_pos] == '#' {
+ break
+ }
+
+ // Consume non-blank characters.
+ for !is_blankz(parser.buffer, parser.buffer_pos) {
+
+ // Check for indicators that may end a plain scalar.
+ if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) ||
+ (parser.flow_level > 0 &&
+ (parser.buffer[parser.buffer_pos] == ',' ||
+ parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+ parser.buffer[parser.buffer_pos] == '}')) {
+ break
+ }
+
+ // Check if we need to join whitespaces and breaks.
+ if leading_blanks || len(whitespaces) > 0 {
+ if leading_blanks {
+ // Do we need to fold line breaks?
+ if leading_break[0] == '\n' {
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ } else {
+ s = append(s, trailing_breaks...)
+ }
+ } else {
+ s = append(s, leading_break...)
+ s = append(s, trailing_breaks...)
+ }
+ trailing_breaks = trailing_breaks[:0]
+ leading_break = leading_break[:0]
+ leading_blanks = false
+ } else {
+ s = append(s, whitespaces...)
+ whitespaces = whitespaces[:0]
+ }
+ }
+
+ // Copy the character.
+ s = read(parser, s)
+
+ end_mark = parser.mark
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ }
+
+ // Is it the end?
+ if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) {
+ break
+ }
+
+ // Consume blank characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+ if is_blank(parser.buffer, parser.buffer_pos) {
+
+ // Check for tab characters that abuse indentation.
+ if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
+ start_mark, "found a tab character that violates indentation")
+ return false
+ }
+
+ // Consume a space or a tab character.
+ if !leading_blanks {
+ whitespaces = read(parser, whitespaces)
+ } else {
+ skip(parser)
+ }
+ } else {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ // Check if it is a first line break.
+ if !leading_blanks {
+ whitespaces = whitespaces[:0]
+ leading_break = read_line(parser, leading_break)
+ leading_blanks = true
+ } else {
+ trailing_breaks = read_line(parser, trailing_breaks)
+ }
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check indentation level.
+ if parser.flow_level == 0 && parser.mark.column < indent {
+ break
+ }
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_PLAIN_SCALAR_STYLE,
+ }
+
+ // Note that we change the 'simple_key_allowed' flag.
+ if leading_blanks {
+ parser.simple_key_allowed = true
+ }
+ return true
+}
+
+func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t) bool {
+ if parser.newlines > 0 {
+ return true
+ }
+
+ var start_mark yaml_mark_t
+ var text []byte
+
+ for peek := 0; peek < 512; peek++ {
+ if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) {
+ break
+ }
+ if is_blank(parser.buffer, parser.buffer_pos+peek) {
+ continue
+ }
+ if parser.buffer[parser.buffer_pos+peek] == '#' {
+ seen := parser.mark.index + peek
+ for {
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if is_breakz(parser.buffer, parser.buffer_pos) {
+ if parser.mark.index >= seen {
+ break
+ }
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ } else if parser.mark.index >= seen {
+ if len(text) == 0 {
+ start_mark = parser.mark
+ }
+ text = read(parser, text)
+ } else {
+ skip(parser)
+ }
+ }
+ }
+ break
+ }
+ if len(text) > 0 {
+ parser.comments = append(parser.comments, yaml_comment_t{
+ token_mark: token_mark,
+ start_mark: start_mark,
+ line: text,
+ })
+ }
+ return true
+}
+
+func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) bool {
+ token := parser.tokens[len(parser.tokens)-1]
+
+ if token.typ == yaml_FLOW_ENTRY_TOKEN && len(parser.tokens) > 1 {
+ token = parser.tokens[len(parser.tokens)-2]
+ }
+
+ var token_mark = token.start_mark
+ var start_mark yaml_mark_t
+ var next_indent = parser.indent
+ if next_indent < 0 {
+ next_indent = 0
+ }
+
+ var recent_empty = false
+ var first_empty = parser.newlines <= 1
+
+ var line = parser.mark.line
+ var column = parser.mark.column
+
+ var text []byte
+
+ // The foot line is the place where a comment must start to
+ // still be considered as a foot of the prior content.
+ // If there's some content in the currently parsed line, then
+ // the foot is the line below it.
+ var foot_line = -1
+ if scan_mark.line > 0 {
+ foot_line = parser.mark.line - parser.newlines + 1
+ if parser.newlines == 0 && parser.mark.column > 1 {
+ foot_line++
+ }
+ }
+
+ var peek = 0
+ for ; peek < 512; peek++ {
+ if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) {
+ break
+ }
+ column++
+ if is_blank(parser.buffer, parser.buffer_pos+peek) {
+ continue
+ }
+ c := parser.buffer[parser.buffer_pos+peek]
+ var close_flow = parser.flow_level > 0 && (c == ']' || c == '}')
+ if close_flow || is_breakz(parser.buffer, parser.buffer_pos+peek) {
+ // Got line break or terminator.
+ if close_flow || !recent_empty {
+ if close_flow || first_empty && (start_mark.line == foot_line && token.typ != yaml_VALUE_TOKEN || start_mark.column-1 < next_indent) {
+ // This is the first empty line and there were no empty lines before,
+ // so this initial part of the comment is a foot of the prior token
+ // instead of being a head for the following one. Split it up.
+ // Alternatively, this might also be the last comment inside a flow
+ // scope, so it must be a footer.
+ if len(text) > 0 {
+ if start_mark.column-1 < next_indent {
+ // If dedented it's unrelated to the prior token.
+ token_mark = start_mark
+ }
+ parser.comments = append(parser.comments, yaml_comment_t{
+ scan_mark: scan_mark,
+ token_mark: token_mark,
+ start_mark: start_mark,
+ end_mark: yaml_mark_t{parser.mark.index + peek, line, column},
+ foot: text,
+ })
+ scan_mark = yaml_mark_t{parser.mark.index + peek, line, column}
+ token_mark = scan_mark
+ text = nil
+ }
+ } else {
+ if len(text) > 0 && parser.buffer[parser.buffer_pos+peek] != 0 {
+ text = append(text, '\n')
+ }
+ }
+ }
+ if !is_break(parser.buffer, parser.buffer_pos+peek) {
+ break
+ }
+ first_empty = false
+ recent_empty = true
+ column = 0
+ line++
+ continue
+ }
+
+ if len(text) > 0 && (close_flow || column-1 < next_indent && column != start_mark.column) {
+ // The comment at the different indentation is a foot of the
+ // preceding data rather than a head of the upcoming one.
+ parser.comments = append(parser.comments, yaml_comment_t{
+ scan_mark: scan_mark,
+ token_mark: token_mark,
+ start_mark: start_mark,
+ end_mark: yaml_mark_t{parser.mark.index + peek, line, column},
+ foot: text,
+ })
+ scan_mark = yaml_mark_t{parser.mark.index + peek, line, column}
+ token_mark = scan_mark
+ text = nil
+ }
+
+ if parser.buffer[parser.buffer_pos+peek] != '#' {
+ break
+ }
+
+ if len(text) == 0 {
+ start_mark = yaml_mark_t{parser.mark.index + peek, line, column}
+ } else {
+ text = append(text, '\n')
+ }
+
+ recent_empty = false
+
+ // Consume until after the consumed comment line.
+ seen := parser.mark.index + peek
+ for {
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if is_breakz(parser.buffer, parser.buffer_pos) {
+ if parser.mark.index >= seen {
+ break
+ }
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ } else if parser.mark.index >= seen {
+ text = read(parser, text)
+ } else {
+ skip(parser)
+ }
+ }
+
+ peek = 0
+ column = 0
+ line = parser.mark.line
+ next_indent = parser.indent
+ if next_indent < 0 {
+ next_indent = 0
+ }
+ }
+
+ if len(text) > 0 {
+ parser.comments = append(parser.comments, yaml_comment_t{
+ scan_mark: scan_mark,
+ token_mark: start_mark,
+ start_mark: start_mark,
+ end_mark: yaml_mark_t{parser.mark.index + peek - 1, line, column},
+ head: text,
+ })
+ }
+ return true
+}
diff --git a/vendor/go.yaml.in/yaml/v3/sorter.go b/vendor/go.yaml.in/yaml/v3/sorter.go
new file mode 100644
index 000000000..9210ece7e
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v3/sorter.go
@@ -0,0 +1,134 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package yaml
+
+import (
+ "reflect"
+ "unicode"
+)
+
+type keyList []reflect.Value
+
+func (l keyList) Len() int { return len(l) }
+func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
+func (l keyList) Less(i, j int) bool {
+ a := l[i]
+ b := l[j]
+ ak := a.Kind()
+ bk := b.Kind()
+ for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
+ a = a.Elem()
+ ak = a.Kind()
+ }
+ for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
+ b = b.Elem()
+ bk = b.Kind()
+ }
+ af, aok := keyFloat(a)
+ bf, bok := keyFloat(b)
+ if aok && bok {
+ if af != bf {
+ return af < bf
+ }
+ if ak != bk {
+ return ak < bk
+ }
+ return numLess(a, b)
+ }
+ if ak != reflect.String || bk != reflect.String {
+ return ak < bk
+ }
+ ar, br := []rune(a.String()), []rune(b.String())
+ digits := false
+ for i := 0; i < len(ar) && i < len(br); i++ {
+ if ar[i] == br[i] {
+ digits = unicode.IsDigit(ar[i])
+ continue
+ }
+ al := unicode.IsLetter(ar[i])
+ bl := unicode.IsLetter(br[i])
+ if al && bl {
+ return ar[i] < br[i]
+ }
+ if al || bl {
+ if digits {
+ return al
+ } else {
+ return bl
+ }
+ }
+ var ai, bi int
+ var an, bn int64
+ if ar[i] == '0' || br[i] == '0' {
+ for j := i - 1; j >= 0 && unicode.IsDigit(ar[j]); j-- {
+ if ar[j] != '0' {
+ an = 1
+ bn = 1
+ break
+ }
+ }
+ }
+ for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
+ an = an*10 + int64(ar[ai]-'0')
+ }
+ for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
+ bn = bn*10 + int64(br[bi]-'0')
+ }
+ if an != bn {
+ return an < bn
+ }
+ if ai != bi {
+ return ai < bi
+ }
+ return ar[i] < br[i]
+ }
+ return len(ar) < len(br)
+}
+
+// keyFloat returns a float value for v if it is a number/bool
+// and whether it is a number/bool or not.
+func keyFloat(v reflect.Value) (f float64, ok bool) {
+ switch v.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return float64(v.Int()), true
+ case reflect.Float32, reflect.Float64:
+ return v.Float(), true
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return float64(v.Uint()), true
+ case reflect.Bool:
+ if v.Bool() {
+ return 1, true
+ }
+ return 0, true
+ }
+ return 0, false
+}
+
+// numLess returns whether a < b.
+// a and b must necessarily have the same kind.
+func numLess(a, b reflect.Value) bool {
+ switch a.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return a.Int() < b.Int()
+ case reflect.Float32, reflect.Float64:
+ return a.Float() < b.Float()
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return a.Uint() < b.Uint()
+ case reflect.Bool:
+ return !a.Bool() && b.Bool()
+ }
+ panic("not a number")
+}
diff --git a/vendor/go.yaml.in/yaml/v3/writerc.go b/vendor/go.yaml.in/yaml/v3/writerc.go
new file mode 100644
index 000000000..266d0b092
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v3/writerc.go
@@ -0,0 +1,48 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+// Set the writer error and return false.
+func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
+ emitter.error = yaml_WRITER_ERROR
+ emitter.problem = problem
+ return false
+}
+
+// Flush the output buffer.
+func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
+ if emitter.write_handler == nil {
+ panic("write handler not set")
+ }
+
+ // Check if the buffer is empty.
+ if emitter.buffer_pos == 0 {
+ return true
+ }
+
+ if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
+ return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
+ }
+ emitter.buffer_pos = 0
+ return true
+}
diff --git a/vendor/go.yaml.in/yaml/v3/yaml.go b/vendor/go.yaml.in/yaml/v3/yaml.go
new file mode 100644
index 000000000..0b101cd20
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v3/yaml.go
@@ -0,0 +1,703 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package yaml implements YAML support for the Go language.
+//
+// Source code and other details for the project are available at GitHub:
+//
+// https://github.com/yaml/go-yaml
+package yaml
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+ "sync"
+ "unicode/utf8"
+)
+
+// The Unmarshaler interface may be implemented by types to customize their
+// behavior when being unmarshaled from a YAML document.
+type Unmarshaler interface {
+ UnmarshalYAML(value *Node) error
+}
+
+type obsoleteUnmarshaler interface {
+ UnmarshalYAML(unmarshal func(interface{}) error) error
+}
+
+// The Marshaler interface may be implemented by types to customize their
+// behavior when being marshaled into a YAML document. The returned value
+// is marshaled in place of the original value implementing Marshaler.
+//
+// If an error is returned by MarshalYAML, the marshaling procedure stops
+// and returns with the provided error.
+type Marshaler interface {
+ MarshalYAML() (interface{}, error)
+}
+
+// Unmarshal decodes the first document found within the in byte slice
+// and assigns decoded values into the out value.
+//
+// Maps and pointers (to a struct, string, int, etc) are accepted as out
+// values. If an internal pointer within a struct is not initialized,
+// the yaml package will initialize it if necessary for unmarshalling
+// the provided data. The out parameter must not be nil.
+//
+// The type of the decoded values should be compatible with the respective
+// values in out. If one or more values cannot be decoded due to a type
+// mismatches, decoding continues partially until the end of the YAML
+// content, and a *yaml.TypeError is returned with details for all
+// missed values.
+//
+// Struct fields are only unmarshalled if they are exported (have an
+// upper case first letter), and are unmarshalled using the field name
+// lowercased as the default key. Custom keys may be defined via the
+// "yaml" name in the field tag: the content preceding the first comma
+// is used as the key, and the following comma-separated options are
+// used to tweak the marshalling process (see Marshal).
+// Conflicting names result in a runtime error.
+//
+// For example:
+//
+// type T struct {
+// F int `yaml:"a,omitempty"`
+// B int
+// }
+// var t T
+// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
+//
+// See the documentation of Marshal for the format of tags and a list of
+// supported tag options.
+func Unmarshal(in []byte, out interface{}) (err error) {
+ return unmarshal(in, out, false)
+}
+
+// A Decoder reads and decodes YAML values from an input stream.
+type Decoder struct {
+ parser *parser
+ knownFields bool
+}
+
+// NewDecoder returns a new decoder that reads from r.
+//
+// The decoder introduces its own buffering and may read
+// data from r beyond the YAML values requested.
+func NewDecoder(r io.Reader) *Decoder {
+ return &Decoder{
+ parser: newParserFromReader(r),
+ }
+}
+
+// KnownFields ensures that the keys in decoded mappings to
+// exist as fields in the struct being decoded into.
+func (dec *Decoder) KnownFields(enable bool) {
+ dec.knownFields = enable
+}
+
+// Decode reads the next YAML-encoded value from its input
+// and stores it in the value pointed to by v.
+//
+// See the documentation for Unmarshal for details about the
+// conversion of YAML into a Go value.
+func (dec *Decoder) Decode(v interface{}) (err error) {
+ d := newDecoder()
+ d.knownFields = dec.knownFields
+ defer handleErr(&err)
+ node := dec.parser.parse()
+ if node == nil {
+ return io.EOF
+ }
+ out := reflect.ValueOf(v)
+ if out.Kind() == reflect.Ptr && !out.IsNil() {
+ out = out.Elem()
+ }
+ d.unmarshal(node, out)
+ if len(d.terrors) > 0 {
+ return &TypeError{d.terrors}
+ }
+ return nil
+}
+
+// Decode decodes the node and stores its data into the value pointed to by v.
+//
+// See the documentation for Unmarshal for details about the
+// conversion of YAML into a Go value.
+func (n *Node) Decode(v interface{}) (err error) {
+ d := newDecoder()
+ defer handleErr(&err)
+ out := reflect.ValueOf(v)
+ if out.Kind() == reflect.Ptr && !out.IsNil() {
+ out = out.Elem()
+ }
+ d.unmarshal(n, out)
+ if len(d.terrors) > 0 {
+ return &TypeError{d.terrors}
+ }
+ return nil
+}
+
+func unmarshal(in []byte, out interface{}, strict bool) (err error) {
+ defer handleErr(&err)
+ d := newDecoder()
+ p := newParser(in)
+ defer p.destroy()
+ node := p.parse()
+ if node != nil {
+ v := reflect.ValueOf(out)
+ if v.Kind() == reflect.Ptr && !v.IsNil() {
+ v = v.Elem()
+ }
+ d.unmarshal(node, v)
+ }
+ if len(d.terrors) > 0 {
+ return &TypeError{d.terrors}
+ }
+ return nil
+}
+
+// Marshal serializes the value provided into a YAML document. The structure
+// of the generated document will reflect the structure of the value itself.
+// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
+//
+// Struct fields are only marshalled if they are exported (have an upper case
+// first letter), and are marshalled using the field name lowercased as the
+// default key. Custom keys may be defined via the "yaml" name in the field
+// tag: the content preceding the first comma is used as the key, and the
+// following comma-separated options are used to tweak the marshalling process.
+// Conflicting names result in a runtime error.
+//
+// The field tag format accepted is:
+//
+// `(...) yaml:"[][,[,]]" (...)`
+//
+// The following flags are currently supported:
+//
+// omitempty Only include the field if it's not set to the zero
+// value for the type or to empty slices or maps.
+// Zero valued structs will be omitted if all their public
+// fields are zero, unless they implement an IsZero
+// method (see the IsZeroer interface type), in which
+// case the field will be excluded if IsZero returns true.
+//
+// flow Marshal using a flow style (useful for structs,
+// sequences and maps).
+//
+// inline Inline the field, which must be a struct or a map,
+// causing all of its fields or keys to be processed as if
+// they were part of the outer struct. For maps, keys must
+// not conflict with the yaml keys of other struct fields.
+//
+// In addition, if the key is "-", the field is ignored.
+//
+// For example:
+//
+// type T struct {
+// F int `yaml:"a,omitempty"`
+// B int
+// }
+// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
+// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
+func Marshal(in interface{}) (out []byte, err error) {
+ defer handleErr(&err)
+ e := newEncoder()
+ defer e.destroy()
+ e.marshalDoc("", reflect.ValueOf(in))
+ e.finish()
+ out = e.out
+ return
+}
+
+// An Encoder writes YAML values to an output stream.
+type Encoder struct {
+ encoder *encoder
+}
+
+// NewEncoder returns a new encoder that writes to w.
+// The Encoder should be closed after use to flush all data
+// to w.
+func NewEncoder(w io.Writer) *Encoder {
+ return &Encoder{
+ encoder: newEncoderWithWriter(w),
+ }
+}
+
+// Encode writes the YAML encoding of v to the stream.
+// If multiple items are encoded to the stream, the
+// second and subsequent document will be preceded
+// with a "---" document separator, but the first will not.
+//
+// See the documentation for Marshal for details about the conversion of Go
+// values to YAML.
+func (e *Encoder) Encode(v interface{}) (err error) {
+ defer handleErr(&err)
+ e.encoder.marshalDoc("", reflect.ValueOf(v))
+ return nil
+}
+
+// Encode encodes value v and stores its representation in n.
+//
+// See the documentation for Marshal for details about the
+// conversion of Go values into YAML.
+func (n *Node) Encode(v interface{}) (err error) {
+ defer handleErr(&err)
+ e := newEncoder()
+ defer e.destroy()
+ e.marshalDoc("", reflect.ValueOf(v))
+ e.finish()
+ p := newParser(e.out)
+ p.textless = true
+ defer p.destroy()
+ doc := p.parse()
+ *n = *doc.Content[0]
+ return nil
+}
+
+// SetIndent changes the used indentation used when encoding.
+func (e *Encoder) SetIndent(spaces int) {
+ if spaces < 0 {
+ panic("yaml: cannot indent to a negative number of spaces")
+ }
+ e.encoder.indent = spaces
+}
+
+// CompactSeqIndent makes it so that '- ' is considered part of the indentation.
+func (e *Encoder) CompactSeqIndent() {
+ e.encoder.emitter.compact_sequence_indent = true
+}
+
+// DefaultSeqIndent makes it so that '- ' is not considered part of the indentation.
+func (e *Encoder) DefaultSeqIndent() {
+ e.encoder.emitter.compact_sequence_indent = false
+}
+
+// Close closes the encoder by writing any remaining data.
+// It does not write a stream terminating string "...".
+func (e *Encoder) Close() (err error) {
+ defer handleErr(&err)
+ e.encoder.finish()
+ return nil
+}
+
+func handleErr(err *error) {
+ if v := recover(); v != nil {
+ if e, ok := v.(yamlError); ok {
+ *err = e.err
+ } else {
+ panic(v)
+ }
+ }
+}
+
+type yamlError struct {
+ err error
+}
+
+func fail(err error) {
+ panic(yamlError{err})
+}
+
+func failf(format string, args ...interface{}) {
+ panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
+}
+
+// A TypeError is returned by Unmarshal when one or more fields in
+// the YAML document cannot be properly decoded into the requested
+// types. When this error is returned, the value is still
+// unmarshaled partially.
+type TypeError struct {
+ Errors []string
+}
+
+func (e *TypeError) Error() string {
+ return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n "))
+}
+
+type Kind uint32
+
+const (
+ DocumentNode Kind = 1 << iota
+ SequenceNode
+ MappingNode
+ ScalarNode
+ AliasNode
+)
+
+type Style uint32
+
+const (
+ TaggedStyle Style = 1 << iota
+ DoubleQuotedStyle
+ SingleQuotedStyle
+ LiteralStyle
+ FoldedStyle
+ FlowStyle
+)
+
+// Node represents an element in the YAML document hierarchy. While documents
+// are typically encoded and decoded into higher level types, such as structs
+// and maps, Node is an intermediate representation that allows detailed
+// control over the content being decoded or encoded.
+//
+// It's worth noting that although Node offers access into details such as
+// line numbers, colums, and comments, the content when re-encoded will not
+// have its original textual representation preserved. An effort is made to
+// render the data plesantly, and to preserve comments near the data they
+// describe, though.
+//
+// Values that make use of the Node type interact with the yaml package in the
+// same way any other type would do, by encoding and decoding yaml data
+// directly or indirectly into them.
+//
+// For example:
+//
+// var person struct {
+// Name string
+// Address yaml.Node
+// }
+// err := yaml.Unmarshal(data, &person)
+//
+// Or by itself:
+//
+// var person Node
+// err := yaml.Unmarshal(data, &person)
+type Node struct {
+ // Kind defines whether the node is a document, a mapping, a sequence,
+ // a scalar value, or an alias to another node. The specific data type of
+ // scalar nodes may be obtained via the ShortTag and LongTag methods.
+ Kind Kind
+
+ // Style allows customizing the apperance of the node in the tree.
+ Style Style
+
+ // Tag holds the YAML tag defining the data type for the value.
+ // When decoding, this field will always be set to the resolved tag,
+ // even when it wasn't explicitly provided in the YAML content.
+ // When encoding, if this field is unset the value type will be
+ // implied from the node properties, and if it is set, it will only
+ // be serialized into the representation if TaggedStyle is used or
+ // the implicit tag diverges from the provided one.
+ Tag string
+
+ // Value holds the unescaped and unquoted represenation of the value.
+ Value string
+
+ // Anchor holds the anchor name for this node, which allows aliases to point to it.
+ Anchor string
+
+ // Alias holds the node that this alias points to. Only valid when Kind is AliasNode.
+ Alias *Node
+
+ // Content holds contained nodes for documents, mappings, and sequences.
+ Content []*Node
+
+ // HeadComment holds any comments in the lines preceding the node and
+ // not separated by an empty line.
+ HeadComment string
+
+ // LineComment holds any comments at the end of the line where the node is in.
+ LineComment string
+
+ // FootComment holds any comments following the node and before empty lines.
+ FootComment string
+
+ // Line and Column hold the node position in the decoded YAML text.
+ // These fields are not respected when encoding the node.
+ Line int
+ Column int
+}
+
+// IsZero returns whether the node has all of its fields unset.
+func (n *Node) IsZero() bool {
+ return n.Kind == 0 && n.Style == 0 && n.Tag == "" && n.Value == "" && n.Anchor == "" && n.Alias == nil && n.Content == nil &&
+ n.HeadComment == "" && n.LineComment == "" && n.FootComment == "" && n.Line == 0 && n.Column == 0
+}
+
+// LongTag returns the long form of the tag that indicates the data type for
+// the node. If the Tag field isn't explicitly defined, one will be computed
+// based on the node properties.
+func (n *Node) LongTag() string {
+ return longTag(n.ShortTag())
+}
+
+// ShortTag returns the short form of the YAML tag that indicates data type for
+// the node. If the Tag field isn't explicitly defined, one will be computed
+// based on the node properties.
+func (n *Node) ShortTag() string {
+ if n.indicatedString() {
+ return strTag
+ }
+ if n.Tag == "" || n.Tag == "!" {
+ switch n.Kind {
+ case MappingNode:
+ return mapTag
+ case SequenceNode:
+ return seqTag
+ case AliasNode:
+ if n.Alias != nil {
+ return n.Alias.ShortTag()
+ }
+ case ScalarNode:
+ tag, _ := resolve("", n.Value)
+ return tag
+ case 0:
+ // Special case to make the zero value convenient.
+ if n.IsZero() {
+ return nullTag
+ }
+ }
+ return ""
+ }
+ return shortTag(n.Tag)
+}
+
+func (n *Node) indicatedString() bool {
+ return n.Kind == ScalarNode &&
+ (shortTag(n.Tag) == strTag ||
+ (n.Tag == "" || n.Tag == "!") && n.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0)
+}
+
+// SetString is a convenience function that sets the node to a string value
+// and defines its style in a pleasant way depending on its content.
+func (n *Node) SetString(s string) {
+ n.Kind = ScalarNode
+ if utf8.ValidString(s) {
+ n.Value = s
+ n.Tag = strTag
+ } else {
+ n.Value = encodeBase64(s)
+ n.Tag = binaryTag
+ }
+ if strings.Contains(n.Value, "\n") {
+ n.Style = LiteralStyle
+ }
+}
+
+// --------------------------------------------------------------------------
+// Maintain a mapping of keys to structure field indexes
+
+// The code in this section was copied from mgo/bson.
+
+// structInfo holds details for the serialization of fields of
+// a given struct.
+type structInfo struct {
+ FieldsMap map[string]fieldInfo
+ FieldsList []fieldInfo
+
+ // InlineMap is the number of the field in the struct that
+ // contains an ,inline map, or -1 if there's none.
+ InlineMap int
+
+ // InlineUnmarshalers holds indexes to inlined fields that
+ // contain unmarshaler values.
+ InlineUnmarshalers [][]int
+}
+
+type fieldInfo struct {
+ Key string
+ Num int
+ OmitEmpty bool
+ Flow bool
+ // Id holds the unique field identifier, so we can cheaply
+ // check for field duplicates without maintaining an extra map.
+ Id int
+
+ // Inline holds the field index if the field is part of an inlined struct.
+ Inline []int
+}
+
+var structMap = make(map[reflect.Type]*structInfo)
+var fieldMapMutex sync.RWMutex
+var unmarshalerType reflect.Type
+
+func init() {
+ var v Unmarshaler
+ unmarshalerType = reflect.ValueOf(&v).Elem().Type()
+}
+
+func getStructInfo(st reflect.Type) (*structInfo, error) {
+ fieldMapMutex.RLock()
+ sinfo, found := structMap[st]
+ fieldMapMutex.RUnlock()
+ if found {
+ return sinfo, nil
+ }
+
+ n := st.NumField()
+ fieldsMap := make(map[string]fieldInfo)
+ fieldsList := make([]fieldInfo, 0, n)
+ inlineMap := -1
+ inlineUnmarshalers := [][]int(nil)
+ for i := 0; i != n; i++ {
+ field := st.Field(i)
+ if field.PkgPath != "" && !field.Anonymous {
+ continue // Private field
+ }
+
+ info := fieldInfo{Num: i}
+
+ tag := field.Tag.Get("yaml")
+ if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
+ tag = string(field.Tag)
+ }
+ if tag == "-" {
+ continue
+ }
+
+ inline := false
+ fields := strings.Split(tag, ",")
+ if len(fields) > 1 {
+ for _, flag := range fields[1:] {
+ switch flag {
+ case "omitempty":
+ info.OmitEmpty = true
+ case "flow":
+ info.Flow = true
+ case "inline":
+ inline = true
+ default:
+ return nil, errors.New(fmt.Sprintf("unsupported flag %q in tag %q of type %s", flag, tag, st))
+ }
+ }
+ tag = fields[0]
+ }
+
+ if inline {
+ switch field.Type.Kind() {
+ case reflect.Map:
+ if inlineMap >= 0 {
+ return nil, errors.New("multiple ,inline maps in struct " + st.String())
+ }
+ if field.Type.Key() != reflect.TypeOf("") {
+ return nil, errors.New("option ,inline needs a map with string keys in struct " + st.String())
+ }
+ inlineMap = info.Num
+ case reflect.Struct, reflect.Ptr:
+ ftype := field.Type
+ for ftype.Kind() == reflect.Ptr {
+ ftype = ftype.Elem()
+ }
+ if ftype.Kind() != reflect.Struct {
+ return nil, errors.New("option ,inline may only be used on a struct or map field")
+ }
+ if reflect.PtrTo(ftype).Implements(unmarshalerType) {
+ inlineUnmarshalers = append(inlineUnmarshalers, []int{i})
+ } else {
+ sinfo, err := getStructInfo(ftype)
+ if err != nil {
+ return nil, err
+ }
+ for _, index := range sinfo.InlineUnmarshalers {
+ inlineUnmarshalers = append(inlineUnmarshalers, append([]int{i}, index...))
+ }
+ for _, finfo := range sinfo.FieldsList {
+ if _, found := fieldsMap[finfo.Key]; found {
+ msg := "duplicated key '" + finfo.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+ if finfo.Inline == nil {
+ finfo.Inline = []int{i, finfo.Num}
+ } else {
+ finfo.Inline = append([]int{i}, finfo.Inline...)
+ }
+ finfo.Id = len(fieldsList)
+ fieldsMap[finfo.Key] = finfo
+ fieldsList = append(fieldsList, finfo)
+ }
+ }
+ default:
+ return nil, errors.New("option ,inline may only be used on a struct or map field")
+ }
+ continue
+ }
+
+ if tag != "" {
+ info.Key = tag
+ } else {
+ info.Key = strings.ToLower(field.Name)
+ }
+
+ if _, found = fieldsMap[info.Key]; found {
+ msg := "duplicated key '" + info.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+
+ info.Id = len(fieldsList)
+ fieldsList = append(fieldsList, info)
+ fieldsMap[info.Key] = info
+ }
+
+ sinfo = &structInfo{
+ FieldsMap: fieldsMap,
+ FieldsList: fieldsList,
+ InlineMap: inlineMap,
+ InlineUnmarshalers: inlineUnmarshalers,
+ }
+
+ fieldMapMutex.Lock()
+ structMap[st] = sinfo
+ fieldMapMutex.Unlock()
+ return sinfo, nil
+}
+
+// IsZeroer is used to check whether an object is zero to
+// determine whether it should be omitted when marshaling
+// with the omitempty flag. One notable implementation
+// is time.Time.
+type IsZeroer interface {
+ IsZero() bool
+}
+
+func isZero(v reflect.Value) bool {
+ kind := v.Kind()
+ if z, ok := v.Interface().(IsZeroer); ok {
+ if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() {
+ return true
+ }
+ return z.IsZero()
+ }
+ switch kind {
+ case reflect.String:
+ return len(v.String()) == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ case reflect.Slice:
+ return v.Len() == 0
+ case reflect.Map:
+ return v.Len() == 0
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Struct:
+ vt := v.Type()
+ for i := v.NumField() - 1; i >= 0; i-- {
+ if vt.Field(i).PkgPath != "" {
+ continue // Private field
+ }
+ if !isZero(v.Field(i)) {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
diff --git a/vendor/go.yaml.in/yaml/v3/yamlh.go b/vendor/go.yaml.in/yaml/v3/yamlh.go
new file mode 100644
index 000000000..f59aa40f6
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v3/yamlh.go
@@ -0,0 +1,811 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "fmt"
+ "io"
+)
+
+// The version directive data.
+type yaml_version_directive_t struct {
+ major int8 // The major version number.
+ minor int8 // The minor version number.
+}
+
+// The tag directive data.
+type yaml_tag_directive_t struct {
+ handle []byte // The tag handle.
+ prefix []byte // The tag prefix.
+}
+
+type yaml_encoding_t int
+
+// The stream encoding.
+const (
+ // Let the parser choose the encoding.
+ yaml_ANY_ENCODING yaml_encoding_t = iota
+
+ yaml_UTF8_ENCODING // The default UTF-8 encoding.
+ yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
+ yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
+)
+
+type yaml_break_t int
+
+// Line break types.
+const (
+ // Let the parser choose the break type.
+ yaml_ANY_BREAK yaml_break_t = iota
+
+ yaml_CR_BREAK // Use CR for line breaks (Mac style).
+ yaml_LN_BREAK // Use LN for line breaks (Unix style).
+ yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
+)
+
+type yaml_error_type_t int
+
+// Many bad things could happen with the parser and emitter.
+const (
+ // No error is produced.
+ yaml_NO_ERROR yaml_error_type_t = iota
+
+ yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory.
+ yaml_READER_ERROR // Cannot read or decode the input stream.
+ yaml_SCANNER_ERROR // Cannot scan the input stream.
+ yaml_PARSER_ERROR // Cannot parse the input stream.
+ yaml_COMPOSER_ERROR // Cannot compose a YAML document.
+ yaml_WRITER_ERROR // Cannot write to the output stream.
+ yaml_EMITTER_ERROR // Cannot emit a YAML stream.
+)
+
+// The pointer position.
+type yaml_mark_t struct {
+ index int // The position index.
+ line int // The position line.
+ column int // The position column.
+}
+
+// Node Styles
+
+type yaml_style_t int8
+
+type yaml_scalar_style_t yaml_style_t
+
+// Scalar styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = 0
+
+ yaml_PLAIN_SCALAR_STYLE yaml_scalar_style_t = 1 << iota // The plain scalar style.
+ yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
+ yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
+ yaml_LITERAL_SCALAR_STYLE // The literal scalar style.
+ yaml_FOLDED_SCALAR_STYLE // The folded scalar style.
+)
+
+type yaml_sequence_style_t yaml_style_t
+
+// Sequence styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
+
+ yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
+ yaml_FLOW_SEQUENCE_STYLE // The flow sequence style.
+)
+
+type yaml_mapping_style_t yaml_style_t
+
+// Mapping styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
+
+ yaml_BLOCK_MAPPING_STYLE // The block mapping style.
+ yaml_FLOW_MAPPING_STYLE // The flow mapping style.
+)
+
+// Tokens
+
+type yaml_token_type_t int
+
+// Token types.
+const (
+ // An empty token.
+ yaml_NO_TOKEN yaml_token_type_t = iota
+
+ yaml_STREAM_START_TOKEN // A STREAM-START token.
+ yaml_STREAM_END_TOKEN // A STREAM-END token.
+
+ yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
+ yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token.
+ yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token.
+ yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token.
+
+ yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
+ yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token.
+ yaml_BLOCK_END_TOKEN // A BLOCK-END token.
+
+ yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
+ yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token.
+ yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token.
+ yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token.
+
+ yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
+ yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token.
+ yaml_KEY_TOKEN // A KEY token.
+ yaml_VALUE_TOKEN // A VALUE token.
+
+ yaml_ALIAS_TOKEN // An ALIAS token.
+ yaml_ANCHOR_TOKEN // An ANCHOR token.
+ yaml_TAG_TOKEN // A TAG token.
+ yaml_SCALAR_TOKEN // A SCALAR token.
+)
+
+func (tt yaml_token_type_t) String() string {
+ switch tt {
+ case yaml_NO_TOKEN:
+ return "yaml_NO_TOKEN"
+ case yaml_STREAM_START_TOKEN:
+ return "yaml_STREAM_START_TOKEN"
+ case yaml_STREAM_END_TOKEN:
+ return "yaml_STREAM_END_TOKEN"
+ case yaml_VERSION_DIRECTIVE_TOKEN:
+ return "yaml_VERSION_DIRECTIVE_TOKEN"
+ case yaml_TAG_DIRECTIVE_TOKEN:
+ return "yaml_TAG_DIRECTIVE_TOKEN"
+ case yaml_DOCUMENT_START_TOKEN:
+ return "yaml_DOCUMENT_START_TOKEN"
+ case yaml_DOCUMENT_END_TOKEN:
+ return "yaml_DOCUMENT_END_TOKEN"
+ case yaml_BLOCK_SEQUENCE_START_TOKEN:
+ return "yaml_BLOCK_SEQUENCE_START_TOKEN"
+ case yaml_BLOCK_MAPPING_START_TOKEN:
+ return "yaml_BLOCK_MAPPING_START_TOKEN"
+ case yaml_BLOCK_END_TOKEN:
+ return "yaml_BLOCK_END_TOKEN"
+ case yaml_FLOW_SEQUENCE_START_TOKEN:
+ return "yaml_FLOW_SEQUENCE_START_TOKEN"
+ case yaml_FLOW_SEQUENCE_END_TOKEN:
+ return "yaml_FLOW_SEQUENCE_END_TOKEN"
+ case yaml_FLOW_MAPPING_START_TOKEN:
+ return "yaml_FLOW_MAPPING_START_TOKEN"
+ case yaml_FLOW_MAPPING_END_TOKEN:
+ return "yaml_FLOW_MAPPING_END_TOKEN"
+ case yaml_BLOCK_ENTRY_TOKEN:
+ return "yaml_BLOCK_ENTRY_TOKEN"
+ case yaml_FLOW_ENTRY_TOKEN:
+ return "yaml_FLOW_ENTRY_TOKEN"
+ case yaml_KEY_TOKEN:
+ return "yaml_KEY_TOKEN"
+ case yaml_VALUE_TOKEN:
+ return "yaml_VALUE_TOKEN"
+ case yaml_ALIAS_TOKEN:
+ return "yaml_ALIAS_TOKEN"
+ case yaml_ANCHOR_TOKEN:
+ return "yaml_ANCHOR_TOKEN"
+ case yaml_TAG_TOKEN:
+ return "yaml_TAG_TOKEN"
+ case yaml_SCALAR_TOKEN:
+ return "yaml_SCALAR_TOKEN"
+ }
+ return ""
+}
+
+// The token structure.
+type yaml_token_t struct {
+ // The token type.
+ typ yaml_token_type_t
+
+ // The start/end of the token.
+ start_mark, end_mark yaml_mark_t
+
+ // The stream encoding (for yaml_STREAM_START_TOKEN).
+ encoding yaml_encoding_t
+
+ // The alias/anchor/scalar value or tag/tag directive handle
+ // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
+ value []byte
+
+ // The tag suffix (for yaml_TAG_TOKEN).
+ suffix []byte
+
+ // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
+ prefix []byte
+
+ // The scalar style (for yaml_SCALAR_TOKEN).
+ style yaml_scalar_style_t
+
+ // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
+ major, minor int8
+}
+
+// Events
+
+type yaml_event_type_t int8
+
+// Event types.
+const (
+ // An empty event.
+ yaml_NO_EVENT yaml_event_type_t = iota
+
+ yaml_STREAM_START_EVENT // A STREAM-START event.
+ yaml_STREAM_END_EVENT // A STREAM-END event.
+ yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
+ yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event.
+ yaml_ALIAS_EVENT // An ALIAS event.
+ yaml_SCALAR_EVENT // A SCALAR event.
+ yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
+ yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event.
+ yaml_MAPPING_START_EVENT // A MAPPING-START event.
+ yaml_MAPPING_END_EVENT // A MAPPING-END event.
+ yaml_TAIL_COMMENT_EVENT
+)
+
+var eventStrings = []string{
+ yaml_NO_EVENT: "none",
+ yaml_STREAM_START_EVENT: "stream start",
+ yaml_STREAM_END_EVENT: "stream end",
+ yaml_DOCUMENT_START_EVENT: "document start",
+ yaml_DOCUMENT_END_EVENT: "document end",
+ yaml_ALIAS_EVENT: "alias",
+ yaml_SCALAR_EVENT: "scalar",
+ yaml_SEQUENCE_START_EVENT: "sequence start",
+ yaml_SEQUENCE_END_EVENT: "sequence end",
+ yaml_MAPPING_START_EVENT: "mapping start",
+ yaml_MAPPING_END_EVENT: "mapping end",
+ yaml_TAIL_COMMENT_EVENT: "tail comment",
+}
+
+func (e yaml_event_type_t) String() string {
+ if e < 0 || int(e) >= len(eventStrings) {
+ return fmt.Sprintf("unknown event %d", e)
+ }
+ return eventStrings[e]
+}
+
+// The event structure.
+type yaml_event_t struct {
+
+ // The event type.
+ typ yaml_event_type_t
+
+ // The start and end of the event.
+ start_mark, end_mark yaml_mark_t
+
+ // The document encoding (for yaml_STREAM_START_EVENT).
+ encoding yaml_encoding_t
+
+ // The version directive (for yaml_DOCUMENT_START_EVENT).
+ version_directive *yaml_version_directive_t
+
+ // The list of tag directives (for yaml_DOCUMENT_START_EVENT).
+ tag_directives []yaml_tag_directive_t
+
+ // The comments
+ head_comment []byte
+ line_comment []byte
+ foot_comment []byte
+ tail_comment []byte
+
+ // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
+ anchor []byte
+
+ // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+ tag []byte
+
+ // The scalar value (for yaml_SCALAR_EVENT).
+ value []byte
+
+ // Is the document start/end indicator implicit, or the tag optional?
+ // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
+ implicit bool
+
+ // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
+ quoted_implicit bool
+
+ // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+ style yaml_style_t
+}
+
+func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) }
+func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
+func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) }
+
+// Nodes
+
+const (
+ yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null.
+ yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false.
+ yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values.
+ yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values.
+ yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values.
+ yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
+
+ yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
+ yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
+
+ // Not in original libyaml.
+ yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
+ yaml_MERGE_TAG = "tag:yaml.org,2002:merge"
+
+ yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str.
+ yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
+ yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map.
+)
+
+type yaml_node_type_t int
+
+// Node types.
+const (
+ // An empty node.
+ yaml_NO_NODE yaml_node_type_t = iota
+
+ yaml_SCALAR_NODE // A scalar node.
+ yaml_SEQUENCE_NODE // A sequence node.
+ yaml_MAPPING_NODE // A mapping node.
+)
+
+// An element of a sequence node.
+type yaml_node_item_t int
+
+// An element of a mapping node.
+type yaml_node_pair_t struct {
+ key int // The key of the element.
+ value int // The value of the element.
+}
+
+// The node structure.
+type yaml_node_t struct {
+ typ yaml_node_type_t // The node type.
+ tag []byte // The node tag.
+
+ // The node data.
+
+ // The scalar parameters (for yaml_SCALAR_NODE).
+ scalar struct {
+ value []byte // The scalar value.
+ length int // The length of the scalar value.
+ style yaml_scalar_style_t // The scalar style.
+ }
+
+ // The sequence parameters (for YAML_SEQUENCE_NODE).
+ sequence struct {
+ items_data []yaml_node_item_t // The stack of sequence items.
+ style yaml_sequence_style_t // The sequence style.
+ }
+
+ // The mapping parameters (for yaml_MAPPING_NODE).
+ mapping struct {
+ pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value).
+ pairs_start *yaml_node_pair_t // The beginning of the stack.
+ pairs_end *yaml_node_pair_t // The end of the stack.
+ pairs_top *yaml_node_pair_t // The top of the stack.
+ style yaml_mapping_style_t // The mapping style.
+ }
+
+ start_mark yaml_mark_t // The beginning of the node.
+ end_mark yaml_mark_t // The end of the node.
+
+}
+
+// The document structure.
+type yaml_document_t struct {
+
+ // The document nodes.
+ nodes []yaml_node_t
+
+ // The version directive.
+ version_directive *yaml_version_directive_t
+
+ // The list of tag directives.
+ tag_directives_data []yaml_tag_directive_t
+ tag_directives_start int // The beginning of the tag directives list.
+ tag_directives_end int // The end of the tag directives list.
+
+ start_implicit int // Is the document start indicator implicit?
+ end_implicit int // Is the document end indicator implicit?
+
+ // The start/end of the document.
+ start_mark, end_mark yaml_mark_t
+}
+
+// The prototype of a read handler.
+//
+// The read handler is called when the parser needs to read more bytes from the
+// source. The handler should write not more than size bytes to the buffer.
+// The number of written bytes should be set to the size_read variable.
+//
+// [in,out] data A pointer to an application data specified by
+//
+// yaml_parser_set_input().
+//
+// [out] buffer The buffer to write the data from the source.
+// [in] size The size of the buffer.
+// [out] size_read The actual number of bytes read from the source.
+//
+// On success, the handler should return 1. If the handler failed,
+// the returned value should be 0. On EOF, the handler should set the
+// size_read to 0 and return 1.
+type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
+
+// This structure holds information about a potential simple key.
+type yaml_simple_key_t struct {
+ possible bool // Is a simple key possible?
+ required bool // Is a simple key required?
+ token_number int // The number of the token.
+ mark yaml_mark_t // The position mark.
+}
+
+// The states of the parser.
+type yaml_parser_state_t int
+
+const (
+ yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
+
+ yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document.
+ yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START.
+ yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document.
+ yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END.
+ yaml_PARSE_BLOCK_NODE_STATE // Expect a block node.
+ yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
+ yaml_PARSE_FLOW_NODE_STATE // Expect a flow node.
+ yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence.
+ yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence.
+ yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence.
+ yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
+ yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key.
+ yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value.
+ yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry.
+ yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping.
+ yaml_PARSE_END_STATE // Expect nothing.
+)
+
+func (ps yaml_parser_state_t) String() string {
+ switch ps {
+ case yaml_PARSE_STREAM_START_STATE:
+ return "yaml_PARSE_STREAM_START_STATE"
+ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+ return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
+ case yaml_PARSE_DOCUMENT_START_STATE:
+ return "yaml_PARSE_DOCUMENT_START_STATE"
+ case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+ return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
+ case yaml_PARSE_DOCUMENT_END_STATE:
+ return "yaml_PARSE_DOCUMENT_END_STATE"
+ case yaml_PARSE_BLOCK_NODE_STATE:
+ return "yaml_PARSE_BLOCK_NODE_STATE"
+ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+ return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
+ case yaml_PARSE_FLOW_NODE_STATE:
+ return "yaml_PARSE_FLOW_NODE_STATE"
+ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+ return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
+ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
+ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
+ case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
+ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
+ case yaml_PARSE_END_STATE:
+ return "yaml_PARSE_END_STATE"
+ }
+ return ""
+}
+
+// This structure holds aliases data.
+type yaml_alias_data_t struct {
+ anchor []byte // The anchor.
+ index int // The node id.
+ mark yaml_mark_t // The anchor mark.
+}
+
+// The parser structure.
+//
+// All members are internal. Manage the structure using the
+// yaml_parser_ family of functions.
+type yaml_parser_t struct {
+
+ // Error handling
+
+ error yaml_error_type_t // Error type.
+
+ problem string // Error description.
+
+ // The byte about which the problem occurred.
+ problem_offset int
+ problem_value int
+ problem_mark yaml_mark_t
+
+ // The error context.
+ context string
+ context_mark yaml_mark_t
+
+ // Reader stuff
+
+ read_handler yaml_read_handler_t // Read handler.
+
+ input_reader io.Reader // File input data.
+ input []byte // String input data.
+ input_pos int
+
+ eof bool // EOF flag
+
+ buffer []byte // The working buffer.
+ buffer_pos int // The current position of the buffer.
+
+ unread int // The number of unread characters in the buffer.
+
+ newlines int // The number of line breaks since last non-break/non-blank character
+
+ raw_buffer []byte // The raw buffer.
+ raw_buffer_pos int // The current position of the buffer.
+
+ encoding yaml_encoding_t // The input encoding.
+
+ offset int // The offset of the current position (in bytes).
+ mark yaml_mark_t // The mark of the current position.
+
+ // Comments
+
+ head_comment []byte // The current head comments
+ line_comment []byte // The current line comments
+ foot_comment []byte // The current foot comments
+ tail_comment []byte // Foot comment that happens at the end of a block.
+ stem_comment []byte // Comment in item preceding a nested structure (list inside list item, etc)
+
+ comments []yaml_comment_t // The folded comments for all parsed tokens
+ comments_head int
+
+ // Scanner stuff
+
+ stream_start_produced bool // Have we started to scan the input stream?
+ stream_end_produced bool // Have we reached the end of the input stream?
+
+ flow_level int // The number of unclosed '[' and '{' indicators.
+
+ tokens []yaml_token_t // The tokens queue.
+ tokens_head int // The head of the tokens queue.
+ tokens_parsed int // The number of tokens fetched from the queue.
+ token_available bool // Does the tokens queue contain a token ready for dequeueing.
+
+ indent int // The current indentation level.
+ indents []int // The indentation levels stack.
+
+ simple_key_allowed bool // May a simple key occur at the current position?
+ simple_keys []yaml_simple_key_t // The stack of simple keys.
+ simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number
+
+ // Parser stuff
+
+ state yaml_parser_state_t // The current parser state.
+ states []yaml_parser_state_t // The parser states stack.
+ marks []yaml_mark_t // The stack of marks.
+ tag_directives []yaml_tag_directive_t // The list of TAG directives.
+
+ // Dumper stuff
+
+ aliases []yaml_alias_data_t // The alias data.
+
+ document *yaml_document_t // The currently parsed document.
+}
+
+type yaml_comment_t struct {
+ scan_mark yaml_mark_t // Position where scanning for comments started
+ token_mark yaml_mark_t // Position after which tokens will be associated with this comment
+ start_mark yaml_mark_t // Position of '#' comment mark
+ end_mark yaml_mark_t // Position where comment terminated
+
+ head []byte
+ line []byte
+ foot []byte
+}
+
+// Emitter Definitions
+
+// The prototype of a write handler.
+//
+// The write handler is called when the emitter needs to flush the accumulated
+// characters to the output. The handler should write @a size bytes of the
+// @a buffer to the output.
+//
+// @param[in,out] data A pointer to an application data specified by
+//
+// yaml_emitter_set_output().
+//
+// @param[in] buffer The buffer with bytes to be written.
+// @param[in] size The size of the buffer.
+//
+// @returns On success, the handler should return @c 1. If the handler failed,
+// the returned value should be @c 0.
+type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
+
+type yaml_emitter_state_t int
+
+// The emitter states.
+const (
+ // Expect STREAM-START.
+ yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
+
+ yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END.
+ yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END.
+ yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document.
+ yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END.
+ yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence.
+ yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE // Expect the next item of a flow sequence, with the comma already written out
+ yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence.
+ yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE // Expect the next key of a flow mapping, with the comma already written out
+ yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
+ yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence.
+ yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence.
+ yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping.
+ yaml_EMIT_END_STATE // Expect nothing.
+)
+
+// The emitter structure.
+//
+// All members are internal. Manage the structure using the @c yaml_emitter_
+// family of functions.
+type yaml_emitter_t struct {
+
+ // Error handling
+
+ error yaml_error_type_t // Error type.
+ problem string // Error description.
+
+ // Writer stuff
+
+ write_handler yaml_write_handler_t // Write handler.
+
+ output_buffer *[]byte // String output data.
+ output_writer io.Writer // File output data.
+
+ buffer []byte // The working buffer.
+ buffer_pos int // The current position of the buffer.
+
+ raw_buffer []byte // The raw buffer.
+ raw_buffer_pos int // The current position of the buffer.
+
+ encoding yaml_encoding_t // The stream encoding.
+
+ // Emitter stuff
+
+ canonical bool // If the output is in the canonical style?
+ best_indent int // The number of indentation spaces.
+ best_width int // The preferred width of the output lines.
+ unicode bool // Allow unescaped non-ASCII characters?
+ line_break yaml_break_t // The preferred line break.
+
+ state yaml_emitter_state_t // The current emitter state.
+ states []yaml_emitter_state_t // The stack of states.
+
+ events []yaml_event_t // The event queue.
+ events_head int // The head of the event queue.
+
+ indents []int // The stack of indentation levels.
+
+ tag_directives []yaml_tag_directive_t // The list of tag directives.
+
+ indent int // The current indentation level.
+
+ compact_sequence_indent bool // Is '- ' is considered part of the indentation for sequence elements?
+
+ flow_level int // The current flow level.
+
+ root_context bool // Is it the document root context?
+ sequence_context bool // Is it a sequence context?
+ mapping_context bool // Is it a mapping context?
+ simple_key_context bool // Is it a simple mapping key context?
+
+ line int // The current line.
+ column int // The current column.
+ whitespace bool // If the last character was a whitespace?
+ indention bool // If the last character was an indentation character (' ', '-', '?', ':')?
+ open_ended bool // If an explicit document end is required?
+
+ space_above bool // Is there's an empty line above?
+ foot_indent int // The indent used to write the foot comment above, or -1 if none.
+
+ // Anchor analysis.
+ anchor_data struct {
+ anchor []byte // The anchor value.
+ alias bool // Is it an alias?
+ }
+
+ // Tag analysis.
+ tag_data struct {
+ handle []byte // The tag handle.
+ suffix []byte // The tag suffix.
+ }
+
+ // Scalar analysis.
+ scalar_data struct {
+ value []byte // The scalar value.
+ multiline bool // Does the scalar contain line breaks?
+ flow_plain_allowed bool // Can the scalar be expessed in the flow plain style?
+ block_plain_allowed bool // Can the scalar be expressed in the block plain style?
+ single_quoted_allowed bool // Can the scalar be expressed in the single quoted style?
+ block_allowed bool // Can the scalar be expressed in the literal or folded styles?
+ style yaml_scalar_style_t // The output style.
+ }
+
+ // Comments
+ head_comment []byte
+ line_comment []byte
+ foot_comment []byte
+ tail_comment []byte
+
+ key_line_comment []byte
+
+ // Dumper stuff
+
+ opened bool // If the stream was already opened?
+ closed bool // If the stream was already closed?
+
+ // The information associated with the document nodes.
+ anchors *struct {
+ references int // The number of references.
+ anchor int // The anchor id.
+ serialized bool // If the node has been emitted?
+ }
+
+ last_anchor_id int // The last assigned anchor id.
+
+ document *yaml_document_t // The currently emitted document.
+}
diff --git a/vendor/go.yaml.in/yaml/v3/yamlprivateh.go b/vendor/go.yaml.in/yaml/v3/yamlprivateh.go
new file mode 100644
index 000000000..dea1ba961
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v3/yamlprivateh.go
@@ -0,0 +1,198 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+const (
+ // The size of the input raw buffer.
+ input_raw_buffer_size = 512
+
+ // The size of the input buffer.
+ // It should be possible to decode the whole raw buffer.
+ input_buffer_size = input_raw_buffer_size * 3
+
+ // The size of the output buffer.
+ output_buffer_size = 128
+
+ // The size of the output raw buffer.
+ // It should be possible to encode the whole output buffer.
+ output_raw_buffer_size = (output_buffer_size*2 + 2)
+
+ // The size of other stacks and queues.
+ initial_stack_size = 16
+ initial_queue_size = 16
+ initial_string_size = 16
+)
+
+// Check if the character at the specified position is an alphabetical
+// character, a digit, '_', or '-'.
+func is_alpha(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
+}
+
+// Check if the character at the specified position is a digit.
+func is_digit(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9'
+}
+
+// Get the value of a digit.
+func as_digit(b []byte, i int) int {
+ return int(b[i]) - '0'
+}
+
+// Check if the character at the specified position is a hex-digit.
+func is_hex(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
+}
+
+// Get the value of a hex-digit.
+func as_hex(b []byte, i int) int {
+ bi := b[i]
+ if bi >= 'A' && bi <= 'F' {
+ return int(bi) - 'A' + 10
+ }
+ if bi >= 'a' && bi <= 'f' {
+ return int(bi) - 'a' + 10
+ }
+ return int(bi) - '0'
+}
+
+// Check if the character is ASCII.
+func is_ascii(b []byte, i int) bool {
+ return b[i] <= 0x7F
+}
+
+// Check if the character at the start of the buffer can be printed unescaped.
+func is_printable(b []byte, i int) bool {
+ return ((b[i] == 0x0A) || // . == #x0A
+ (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
+ (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
+ (b[i] > 0xC2 && b[i] < 0xED) ||
+ (b[i] == 0xED && b[i+1] < 0xA0) ||
+ (b[i] == 0xEE) ||
+ (b[i] == 0xEF && // #xE000 <= . <= #xFFFD
+ !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
+ !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
+}
+
+// Check if the character at the specified position is NUL.
+func is_z(b []byte, i int) bool {
+ return b[i] == 0x00
+}
+
+// Check if the beginning of the buffer is a BOM.
+func is_bom(b []byte, i int) bool {
+ return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
+}
+
+// Check if the character at the specified position is space.
+func is_space(b []byte, i int) bool {
+ return b[i] == ' '
+}
+
+// Check if the character at the specified position is tab.
+func is_tab(b []byte, i int) bool {
+ return b[i] == '\t'
+}
+
+// Check if the character at the specified position is blank (space or tab).
+func is_blank(b []byte, i int) bool {
+ //return is_space(b, i) || is_tab(b, i)
+ return b[i] == ' ' || b[i] == '\t'
+}
+
+// Check if the character at the specified position is a line break.
+func is_break(b []byte, i int) bool {
+ return (b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
+}
+
+func is_crlf(b []byte, i int) bool {
+ return b[i] == '\r' && b[i+1] == '\n'
+}
+
+// Check if the character is a line break or NUL.
+func is_breakz(b []byte, i int) bool {
+ //return is_break(b, i) || is_z(b, i)
+ return (
+ // is_break:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ // is_z:
+ b[i] == 0)
+}
+
+// Check if the character is a line break, space, or NUL.
+func is_spacez(b []byte, i int) bool {
+ //return is_space(b, i) || is_breakz(b, i)
+ return (
+ // is_space:
+ b[i] == ' ' ||
+ // is_breakz:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ b[i] == 0)
+}
+
+// Check if the character is a line break, space, tab, or NUL.
+func is_blankz(b []byte, i int) bool {
+ //return is_blank(b, i) || is_breakz(b, i)
+ return (
+ // is_blank:
+ b[i] == ' ' || b[i] == '\t' ||
+ // is_breakz:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ b[i] == 0)
+}
+
+// Determine the width of the character.
+func width(b byte) int {
+ // Don't replace these by a switch without first
+ // confirming that it is being inlined.
+ if b&0x80 == 0x00 {
+ return 1
+ }
+ if b&0xE0 == 0xC0 {
+ return 2
+ }
+ if b&0xF0 == 0xE0 {
+ return 3
+ }
+ if b&0xF8 == 0xF0 {
+ return 4
+ }
+ return 0
+
+}
diff --git a/vendor/golang.org/x/crypto/LICENSE b/vendor/golang.org/x/crypto/LICENSE
new file mode 100644
index 000000000..2a7cf70da
--- /dev/null
+++ b/vendor/golang.org/x/crypto/LICENSE
@@ -0,0 +1,27 @@
+Copyright 2009 The Go Authors.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google LLC nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/crypto/PATENTS b/vendor/golang.org/x/crypto/PATENTS
new file mode 100644
index 000000000..733099041
--- /dev/null
+++ b/vendor/golang.org/x/crypto/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/vendor/golang.org/x/crypto/cryptobyte/asn1.go b/vendor/golang.org/x/crypto/cryptobyte/asn1.go
new file mode 100644
index 000000000..d25979d9f
--- /dev/null
+++ b/vendor/golang.org/x/crypto/cryptobyte/asn1.go
@@ -0,0 +1,825 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cryptobyte
+
+import (
+ encoding_asn1 "encoding/asn1"
+ "fmt"
+ "math/big"
+ "reflect"
+ "time"
+
+ "golang.org/x/crypto/cryptobyte/asn1"
+)
+
+// This file contains ASN.1-related methods for String and Builder.
+
+// Builder
+
+// AddASN1Int64 appends a DER-encoded ASN.1 INTEGER.
+func (b *Builder) AddASN1Int64(v int64) {
+ b.addASN1Signed(asn1.INTEGER, v)
+}
+
+// AddASN1Int64WithTag appends a DER-encoded ASN.1 INTEGER with the
+// given tag.
+func (b *Builder) AddASN1Int64WithTag(v int64, tag asn1.Tag) {
+ b.addASN1Signed(tag, v)
+}
+
+// AddASN1Enum appends a DER-encoded ASN.1 ENUMERATION.
+func (b *Builder) AddASN1Enum(v int64) {
+ b.addASN1Signed(asn1.ENUM, v)
+}
+
+func (b *Builder) addASN1Signed(tag asn1.Tag, v int64) {
+ b.AddASN1(tag, func(c *Builder) {
+ length := 1
+ for i := v; i >= 0x80 || i < -0x80; i >>= 8 {
+ length++
+ }
+
+ for ; length > 0; length-- {
+ i := v >> uint((length-1)*8) & 0xff
+ c.AddUint8(uint8(i))
+ }
+ })
+}
+
+// AddASN1Uint64 appends a DER-encoded ASN.1 INTEGER.
+func (b *Builder) AddASN1Uint64(v uint64) {
+ b.AddASN1(asn1.INTEGER, func(c *Builder) {
+ length := 1
+ for i := v; i >= 0x80; i >>= 8 {
+ length++
+ }
+
+ for ; length > 0; length-- {
+ i := v >> uint((length-1)*8) & 0xff
+ c.AddUint8(uint8(i))
+ }
+ })
+}
+
+// AddASN1BigInt appends a DER-encoded ASN.1 INTEGER.
+func (b *Builder) AddASN1BigInt(n *big.Int) {
+ if b.err != nil {
+ return
+ }
+
+ b.AddASN1(asn1.INTEGER, func(c *Builder) {
+ if n.Sign() < 0 {
+ // A negative number has to be converted to two's-complement form. So we
+ // invert and subtract 1. If the most-significant-bit isn't set then
+ // we'll need to pad the beginning with 0xff in order to keep the number
+ // negative.
+ nMinus1 := new(big.Int).Neg(n)
+ nMinus1.Sub(nMinus1, bigOne)
+ bytes := nMinus1.Bytes()
+ for i := range bytes {
+ bytes[i] ^= 0xff
+ }
+ if len(bytes) == 0 || bytes[0]&0x80 == 0 {
+ c.add(0xff)
+ }
+ c.add(bytes...)
+ } else if n.Sign() == 0 {
+ c.add(0)
+ } else {
+ bytes := n.Bytes()
+ if bytes[0]&0x80 != 0 {
+ c.add(0)
+ }
+ c.add(bytes...)
+ }
+ })
+}
+
+// AddASN1OctetString appends a DER-encoded ASN.1 OCTET STRING.
+func (b *Builder) AddASN1OctetString(bytes []byte) {
+ b.AddASN1(asn1.OCTET_STRING, func(c *Builder) {
+ c.AddBytes(bytes)
+ })
+}
+
+const generalizedTimeFormatStr = "20060102150405Z0700"
+
+// AddASN1GeneralizedTime appends a DER-encoded ASN.1 GENERALIZEDTIME.
+func (b *Builder) AddASN1GeneralizedTime(t time.Time) {
+ if t.Year() < 0 || t.Year() > 9999 {
+ b.err = fmt.Errorf("cryptobyte: cannot represent %v as a GeneralizedTime", t)
+ return
+ }
+ b.AddASN1(asn1.GeneralizedTime, func(c *Builder) {
+ c.AddBytes([]byte(t.Format(generalizedTimeFormatStr)))
+ })
+}
+
+// AddASN1UTCTime appends a DER-encoded ASN.1 UTCTime.
+func (b *Builder) AddASN1UTCTime(t time.Time) {
+ b.AddASN1(asn1.UTCTime, func(c *Builder) {
+ // As utilized by the X.509 profile, UTCTime can only
+ // represent the years 1950 through 2049.
+ if t.Year() < 1950 || t.Year() >= 2050 {
+ b.err = fmt.Errorf("cryptobyte: cannot represent %v as a UTCTime", t)
+ return
+ }
+ c.AddBytes([]byte(t.Format(defaultUTCTimeFormatStr)))
+ })
+}
+
+// AddASN1BitString appends a DER-encoded ASN.1 BIT STRING. This does not
+// support BIT STRINGs that are not a whole number of bytes.
+func (b *Builder) AddASN1BitString(data []byte) {
+ b.AddASN1(asn1.BIT_STRING, func(b *Builder) {
+ b.AddUint8(0)
+ b.AddBytes(data)
+ })
+}
+
+func (b *Builder) addBase128Int(n int64) {
+ var length int
+ if n == 0 {
+ length = 1
+ } else {
+ for i := n; i > 0; i >>= 7 {
+ length++
+ }
+ }
+
+ for i := length - 1; i >= 0; i-- {
+ o := byte(n >> uint(i*7))
+ o &= 0x7f
+ if i != 0 {
+ o |= 0x80
+ }
+
+ b.add(o)
+ }
+}
+
+func isValidOID(oid encoding_asn1.ObjectIdentifier) bool {
+ if len(oid) < 2 {
+ return false
+ }
+
+ if oid[0] > 2 || (oid[0] <= 1 && oid[1] >= 40) {
+ return false
+ }
+
+ for _, v := range oid {
+ if v < 0 {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (b *Builder) AddASN1ObjectIdentifier(oid encoding_asn1.ObjectIdentifier) {
+ b.AddASN1(asn1.OBJECT_IDENTIFIER, func(b *Builder) {
+ if !isValidOID(oid) {
+ b.err = fmt.Errorf("cryptobyte: invalid OID: %v", oid)
+ return
+ }
+
+ b.addBase128Int(int64(oid[0])*40 + int64(oid[1]))
+ for _, v := range oid[2:] {
+ b.addBase128Int(int64(v))
+ }
+ })
+}
+
+func (b *Builder) AddASN1Boolean(v bool) {
+ b.AddASN1(asn1.BOOLEAN, func(b *Builder) {
+ if v {
+ b.AddUint8(0xff)
+ } else {
+ b.AddUint8(0)
+ }
+ })
+}
+
+func (b *Builder) AddASN1NULL() {
+ b.add(uint8(asn1.NULL), 0)
+}
+
+// MarshalASN1 calls encoding_asn1.Marshal on its input and appends the result if
+// successful or records an error if one occurred.
+func (b *Builder) MarshalASN1(v interface{}) {
+ // NOTE(martinkr): This is somewhat of a hack to allow propagation of
+ // encoding_asn1.Marshal errors into Builder.err. N.B. if you call MarshalASN1 with a
+ // value embedded into a struct, its tag information is lost.
+ if b.err != nil {
+ return
+ }
+ bytes, err := encoding_asn1.Marshal(v)
+ if err != nil {
+ b.err = err
+ return
+ }
+ b.AddBytes(bytes)
+}
+
+// AddASN1 appends an ASN.1 object. The object is prefixed with the given tag.
+// Tags greater than 30 are not supported and result in an error (i.e.
+// low-tag-number form only). The child builder passed to the
+// BuilderContinuation can be used to build the content of the ASN.1 object.
+func (b *Builder) AddASN1(tag asn1.Tag, f BuilderContinuation) {
+ if b.err != nil {
+ return
+ }
+ // Identifiers with the low five bits set indicate high-tag-number format
+ // (two or more octets), which we don't support.
+ if tag&0x1f == 0x1f {
+ b.err = fmt.Errorf("cryptobyte: high-tag number identifier octets not supported: 0x%x", tag)
+ return
+ }
+ b.AddUint8(uint8(tag))
+ b.addLengthPrefixed(1, true, f)
+}
+
+// String
+
+// ReadASN1Boolean decodes an ASN.1 BOOLEAN and converts it to a boolean
+// representation into out and advances. It reports whether the read
+// was successful.
+func (s *String) ReadASN1Boolean(out *bool) bool {
+ var bytes String
+ if !s.ReadASN1(&bytes, asn1.BOOLEAN) || len(bytes) != 1 {
+ return false
+ }
+
+ switch bytes[0] {
+ case 0:
+ *out = false
+ case 0xff:
+ *out = true
+ default:
+ return false
+ }
+
+ return true
+}
+
+// ReadASN1Integer decodes an ASN.1 INTEGER into out and advances. If out does
+// not point to an integer, to a big.Int, or to a []byte it panics. Only
+// positive and zero values can be decoded into []byte, and they are returned as
+// big-endian binary values that share memory with s. Positive values will have
+// no leading zeroes, and zero will be returned as a single zero byte.
+// ReadASN1Integer reports whether the read was successful.
+func (s *String) ReadASN1Integer(out interface{}) bool {
+ switch out := out.(type) {
+ case *int, *int8, *int16, *int32, *int64:
+ var i int64
+ if !s.readASN1Int64(&i) || reflect.ValueOf(out).Elem().OverflowInt(i) {
+ return false
+ }
+ reflect.ValueOf(out).Elem().SetInt(i)
+ return true
+ case *uint, *uint8, *uint16, *uint32, *uint64:
+ var u uint64
+ if !s.readASN1Uint64(&u) || reflect.ValueOf(out).Elem().OverflowUint(u) {
+ return false
+ }
+ reflect.ValueOf(out).Elem().SetUint(u)
+ return true
+ case *big.Int:
+ return s.readASN1BigInt(out)
+ case *[]byte:
+ return s.readASN1Bytes(out)
+ default:
+ panic("out does not point to an integer type")
+ }
+}
+
+func checkASN1Integer(bytes []byte) bool {
+ if len(bytes) == 0 {
+ // An INTEGER is encoded with at least one octet.
+ return false
+ }
+ if len(bytes) == 1 {
+ return true
+ }
+ if bytes[0] == 0 && bytes[1]&0x80 == 0 || bytes[0] == 0xff && bytes[1]&0x80 == 0x80 {
+ // Value is not minimally encoded.
+ return false
+ }
+ return true
+}
+
+var bigOne = big.NewInt(1)
+
+func (s *String) readASN1BigInt(out *big.Int) bool {
+ var bytes String
+ if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) {
+ return false
+ }
+ if bytes[0]&0x80 == 0x80 {
+ // Negative number.
+ neg := make([]byte, len(bytes))
+ for i, b := range bytes {
+ neg[i] = ^b
+ }
+ out.SetBytes(neg)
+ out.Add(out, bigOne)
+ out.Neg(out)
+ } else {
+ out.SetBytes(bytes)
+ }
+ return true
+}
+
+func (s *String) readASN1Bytes(out *[]byte) bool {
+ var bytes String
+ if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) {
+ return false
+ }
+ if bytes[0]&0x80 == 0x80 {
+ return false
+ }
+ for len(bytes) > 1 && bytes[0] == 0 {
+ bytes = bytes[1:]
+ }
+ *out = bytes
+ return true
+}
+
+func (s *String) readASN1Int64(out *int64) bool {
+ var bytes String
+ if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Signed(out, bytes) {
+ return false
+ }
+ return true
+}
+
+func asn1Signed(out *int64, n []byte) bool {
+ length := len(n)
+ if length > 8 {
+ return false
+ }
+ for i := 0; i < length; i++ {
+ *out <<= 8
+ *out |= int64(n[i])
+ }
+ // Shift up and down in order to sign extend the result.
+ *out <<= 64 - uint8(length)*8
+ *out >>= 64 - uint8(length)*8
+ return true
+}
+
+func (s *String) readASN1Uint64(out *uint64) bool {
+ var bytes String
+ if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Unsigned(out, bytes) {
+ return false
+ }
+ return true
+}
+
+func asn1Unsigned(out *uint64, n []byte) bool {
+ length := len(n)
+ if length > 9 || length == 9 && n[0] != 0 {
+ // Too large for uint64.
+ return false
+ }
+ if n[0]&0x80 != 0 {
+ // Negative number.
+ return false
+ }
+ for i := 0; i < length; i++ {
+ *out <<= 8
+ *out |= uint64(n[i])
+ }
+ return true
+}
+
+// ReadASN1Int64WithTag decodes an ASN.1 INTEGER with the given tag into out
+// and advances. It reports whether the read was successful and resulted in a
+// value that can be represented in an int64.
+func (s *String) ReadASN1Int64WithTag(out *int64, tag asn1.Tag) bool {
+ var bytes String
+ return s.ReadASN1(&bytes, tag) && checkASN1Integer(bytes) && asn1Signed(out, bytes)
+}
+
+// ReadASN1Enum decodes an ASN.1 ENUMERATION into out and advances. It reports
+// whether the read was successful.
+func (s *String) ReadASN1Enum(out *int) bool {
+ var bytes String
+ var i int64
+ if !s.ReadASN1(&bytes, asn1.ENUM) || !checkASN1Integer(bytes) || !asn1Signed(&i, bytes) {
+ return false
+ }
+ if int64(int(i)) != i {
+ return false
+ }
+ *out = int(i)
+ return true
+}
+
+func (s *String) readBase128Int(out *int) bool {
+ ret := 0
+ for i := 0; len(*s) > 0; i++ {
+ if i == 5 {
+ return false
+ }
+ // Avoid overflowing int on a 32-bit platform.
+ // We don't want different behavior based on the architecture.
+ if ret >= 1<<(31-7) {
+ return false
+ }
+ ret <<= 7
+ b := s.read(1)[0]
+
+ // ITU-T X.690, section 8.19.2:
+ // The subidentifier shall be encoded in the fewest possible octets,
+ // that is, the leading octet of the subidentifier shall not have the value 0x80.
+ if i == 0 && b == 0x80 {
+ return false
+ }
+
+ ret |= int(b & 0x7f)
+ if b&0x80 == 0 {
+ *out = ret
+ return true
+ }
+ }
+ return false // truncated
+}
+
+// ReadASN1ObjectIdentifier decodes an ASN.1 OBJECT IDENTIFIER into out and
+// advances. It reports whether the read was successful.
+func (s *String) ReadASN1ObjectIdentifier(out *encoding_asn1.ObjectIdentifier) bool {
+ var bytes String
+ if !s.ReadASN1(&bytes, asn1.OBJECT_IDENTIFIER) || len(bytes) == 0 {
+ return false
+ }
+
+ // In the worst case, we get two elements from the first byte (which is
+ // encoded differently) and then every varint is a single byte long.
+ components := make([]int, len(bytes)+1)
+
+ // The first varint is 40*value1 + value2:
+ // According to this packing, value1 can take the values 0, 1 and 2 only.
+ // When value1 = 0 or value1 = 1, then value2 is <= 39. When value1 = 2,
+ // then there are no restrictions on value2.
+ var v int
+ if !bytes.readBase128Int(&v) {
+ return false
+ }
+ if v < 80 {
+ components[0] = v / 40
+ components[1] = v % 40
+ } else {
+ components[0] = 2
+ components[1] = v - 80
+ }
+
+ i := 2
+ for ; len(bytes) > 0; i++ {
+ if !bytes.readBase128Int(&v) {
+ return false
+ }
+ components[i] = v
+ }
+ *out = components[:i]
+ return true
+}
+
+// ReadASN1GeneralizedTime decodes an ASN.1 GENERALIZEDTIME into out and
+// advances. It reports whether the read was successful.
+func (s *String) ReadASN1GeneralizedTime(out *time.Time) bool {
+ var bytes String
+ if !s.ReadASN1(&bytes, asn1.GeneralizedTime) {
+ return false
+ }
+ t := string(bytes)
+ res, err := time.Parse(generalizedTimeFormatStr, t)
+ if err != nil {
+ return false
+ }
+ if serialized := res.Format(generalizedTimeFormatStr); serialized != t {
+ return false
+ }
+ *out = res
+ return true
+}
+
+const defaultUTCTimeFormatStr = "060102150405Z0700"
+
+// ReadASN1UTCTime decodes an ASN.1 UTCTime into out and advances.
+// It reports whether the read was successful.
+func (s *String) ReadASN1UTCTime(out *time.Time) bool {
+ var bytes String
+ if !s.ReadASN1(&bytes, asn1.UTCTime) {
+ return false
+ }
+ t := string(bytes)
+
+ formatStr := defaultUTCTimeFormatStr
+ var err error
+ res, err := time.Parse(formatStr, t)
+ if err != nil {
+ // Fallback to minute precision if we can't parse second
+ // precision. If we are following X.509 or X.690 we shouldn't
+ // support this, but we do.
+ formatStr = "0601021504Z0700"
+ res, err = time.Parse(formatStr, t)
+ }
+ if err != nil {
+ return false
+ }
+
+ if serialized := res.Format(formatStr); serialized != t {
+ return false
+ }
+
+ if res.Year() >= 2050 {
+ // UTCTime interprets the low order digits 50-99 as 1950-99.
+ // This only applies to its use in the X.509 profile.
+ // See https://tools.ietf.org/html/rfc5280#section-4.1.2.5.1
+ res = res.AddDate(-100, 0, 0)
+ }
+ *out = res
+ return true
+}
+
+// ReadASN1BitString decodes an ASN.1 BIT STRING into out and advances.
+// It reports whether the read was successful.
+func (s *String) ReadASN1BitString(out *encoding_asn1.BitString) bool {
+ var bytes String
+ if !s.ReadASN1(&bytes, asn1.BIT_STRING) || len(bytes) == 0 ||
+ len(bytes)*8/8 != len(bytes) {
+ return false
+ }
+
+ paddingBits := bytes[0]
+ bytes = bytes[1:]
+ if paddingBits > 7 ||
+ len(bytes) == 0 && paddingBits != 0 ||
+ len(bytes) > 0 && bytes[len(bytes)-1]&(1< 4 || len(*s) < int(2+lenLen) {
+ return false
+ }
+
+ lenBytes := String((*s)[2 : 2+lenLen])
+ if !lenBytes.readUnsigned(&len32, int(lenLen)) {
+ return false
+ }
+
+ // ITU-T X.690 section 10.1 (DER length forms) requires encoding the length
+ // with the minimum number of octets.
+ if len32 < 128 {
+ // Length should have used short-form encoding.
+ return false
+ }
+ if len32>>((lenLen-1)*8) == 0 {
+ // Leading octet is 0. Length should have been at least one byte shorter.
+ return false
+ }
+
+ headerLen = 2 + uint32(lenLen)
+ if headerLen+len32 < len32 {
+ // Overflow.
+ return false
+ }
+ length = headerLen + len32
+ }
+
+ if int(length) < 0 || !s.ReadBytes((*[]byte)(out), int(length)) {
+ return false
+ }
+ if skipHeader && !out.Skip(int(headerLen)) {
+ panic("cryptobyte: internal error")
+ }
+
+ return true
+}
diff --git a/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go b/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go
new file mode 100644
index 000000000..90ef6a241
--- /dev/null
+++ b/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go
@@ -0,0 +1,46 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package asn1 contains supporting types for parsing and building ASN.1
+// messages with the cryptobyte package.
+package asn1
+
+// Tag represents an ASN.1 identifier octet, consisting of a tag number
+// (indicating a type) and class (such as context-specific or constructed).
+//
+// Methods in the cryptobyte package only support the low-tag-number form, i.e.
+// a single identifier octet with bits 7-8 encoding the class and bits 1-6
+// encoding the tag number.
+type Tag uint8
+
+const (
+ classConstructed = 0x20
+ classContextSpecific = 0x80
+)
+
+// Constructed returns t with the constructed class bit set.
+func (t Tag) Constructed() Tag { return t | classConstructed }
+
+// ContextSpecific returns t with the context-specific class bit set.
+func (t Tag) ContextSpecific() Tag { return t | classContextSpecific }
+
+// The following is a list of standard tag and class combinations.
+const (
+ BOOLEAN = Tag(1)
+ INTEGER = Tag(2)
+ BIT_STRING = Tag(3)
+ OCTET_STRING = Tag(4)
+ NULL = Tag(5)
+ OBJECT_IDENTIFIER = Tag(6)
+ ENUM = Tag(10)
+ UTF8String = Tag(12)
+ SEQUENCE = Tag(16 | classConstructed)
+ SET = Tag(17 | classConstructed)
+ PrintableString = Tag(19)
+ T61String = Tag(20)
+ IA5String = Tag(22)
+ UTCTime = Tag(23)
+ GeneralizedTime = Tag(24)
+ GeneralString = Tag(27)
+)
diff --git a/vendor/golang.org/x/crypto/cryptobyte/builder.go b/vendor/golang.org/x/crypto/cryptobyte/builder.go
new file mode 100644
index 000000000..cf254f5f1
--- /dev/null
+++ b/vendor/golang.org/x/crypto/cryptobyte/builder.go
@@ -0,0 +1,350 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cryptobyte
+
+import (
+ "errors"
+ "fmt"
+)
+
+// A Builder builds byte strings from fixed-length and length-prefixed values.
+// Builders either allocate space as needed, or are ‘fixed’, which means that
+// they write into a given buffer and produce an error if it's exhausted.
+//
+// The zero value is a usable Builder that allocates space as needed.
+//
+// Simple values are marshaled and appended to a Builder using methods on the
+// Builder. Length-prefixed values are marshaled by providing a
+// BuilderContinuation, which is a function that writes the inner contents of
+// the value to a given Builder. See the documentation for BuilderContinuation
+// for details.
+type Builder struct {
+ err error
+ result []byte
+ fixedSize bool
+ child *Builder
+ offset int
+ pendingLenLen int
+ pendingIsASN1 bool
+ inContinuation *bool
+}
+
+// NewBuilder creates a Builder that appends its output to the given buffer.
+// Like append(), the slice will be reallocated if its capacity is exceeded.
+// Use Bytes to get the final buffer.
+func NewBuilder(buffer []byte) *Builder {
+ return &Builder{
+ result: buffer,
+ }
+}
+
+// NewFixedBuilder creates a Builder that appends its output into the given
+// buffer. This builder does not reallocate the output buffer. Writes that
+// would exceed the buffer's capacity are treated as an error.
+func NewFixedBuilder(buffer []byte) *Builder {
+ return &Builder{
+ result: buffer,
+ fixedSize: true,
+ }
+}
+
+// SetError sets the value to be returned as the error from Bytes. Writes
+// performed after calling SetError are ignored.
+func (b *Builder) SetError(err error) {
+ b.err = err
+}
+
+// Bytes returns the bytes written by the builder or an error if one has
+// occurred during building.
+func (b *Builder) Bytes() ([]byte, error) {
+ if b.err != nil {
+ return nil, b.err
+ }
+ return b.result[b.offset:], nil
+}
+
+// BytesOrPanic returns the bytes written by the builder or panics if an error
+// has occurred during building.
+func (b *Builder) BytesOrPanic() []byte {
+ if b.err != nil {
+ panic(b.err)
+ }
+ return b.result[b.offset:]
+}
+
+// AddUint8 appends an 8-bit value to the byte string.
+func (b *Builder) AddUint8(v uint8) {
+ b.add(byte(v))
+}
+
+// AddUint16 appends a big-endian, 16-bit value to the byte string.
+func (b *Builder) AddUint16(v uint16) {
+ b.add(byte(v>>8), byte(v))
+}
+
+// AddUint24 appends a big-endian, 24-bit value to the byte string. The highest
+// byte of the 32-bit input value is silently truncated.
+func (b *Builder) AddUint24(v uint32) {
+ b.add(byte(v>>16), byte(v>>8), byte(v))
+}
+
+// AddUint32 appends a big-endian, 32-bit value to the byte string.
+func (b *Builder) AddUint32(v uint32) {
+ b.add(byte(v>>24), byte(v>>16), byte(v>>8), byte(v))
+}
+
+// AddUint48 appends a big-endian, 48-bit value to the byte string.
+func (b *Builder) AddUint48(v uint64) {
+ b.add(byte(v>>40), byte(v>>32), byte(v>>24), byte(v>>16), byte(v>>8), byte(v))
+}
+
+// AddUint64 appends a big-endian, 64-bit value to the byte string.
+func (b *Builder) AddUint64(v uint64) {
+ b.add(byte(v>>56), byte(v>>48), byte(v>>40), byte(v>>32), byte(v>>24), byte(v>>16), byte(v>>8), byte(v))
+}
+
+// AddBytes appends a sequence of bytes to the byte string.
+func (b *Builder) AddBytes(v []byte) {
+ b.add(v...)
+}
+
+// BuilderContinuation is a continuation-passing interface for building
+// length-prefixed byte sequences. Builder methods for length-prefixed
+// sequences (AddUint8LengthPrefixed etc) will invoke the BuilderContinuation
+// supplied to them. The child builder passed to the continuation can be used
+// to build the content of the length-prefixed sequence. For example:
+//
+// parent := cryptobyte.NewBuilder()
+// parent.AddUint8LengthPrefixed(func (child *Builder) {
+// child.AddUint8(42)
+// child.AddUint8LengthPrefixed(func (grandchild *Builder) {
+// grandchild.AddUint8(5)
+// })
+// })
+//
+// It is an error to write more bytes to the child than allowed by the reserved
+// length prefix. After the continuation returns, the child must be considered
+// invalid, i.e. users must not store any copies or references of the child
+// that outlive the continuation.
+//
+// If the continuation panics with a value of type BuildError then the inner
+// error will be returned as the error from Bytes. If the child panics
+// otherwise then Bytes will repanic with the same value.
+type BuilderContinuation func(child *Builder)
+
+// BuildError wraps an error. If a BuilderContinuation panics with this value,
+// the panic will be recovered and the inner error will be returned from
+// Builder.Bytes.
+type BuildError struct {
+ Err error
+}
+
+// AddUint8LengthPrefixed adds a 8-bit length-prefixed byte sequence.
+func (b *Builder) AddUint8LengthPrefixed(f BuilderContinuation) {
+ b.addLengthPrefixed(1, false, f)
+}
+
+// AddUint16LengthPrefixed adds a big-endian, 16-bit length-prefixed byte sequence.
+func (b *Builder) AddUint16LengthPrefixed(f BuilderContinuation) {
+ b.addLengthPrefixed(2, false, f)
+}
+
+// AddUint24LengthPrefixed adds a big-endian, 24-bit length-prefixed byte sequence.
+func (b *Builder) AddUint24LengthPrefixed(f BuilderContinuation) {
+ b.addLengthPrefixed(3, false, f)
+}
+
+// AddUint32LengthPrefixed adds a big-endian, 32-bit length-prefixed byte sequence.
+func (b *Builder) AddUint32LengthPrefixed(f BuilderContinuation) {
+ b.addLengthPrefixed(4, false, f)
+}
+
+func (b *Builder) callContinuation(f BuilderContinuation, arg *Builder) {
+ if !*b.inContinuation {
+ *b.inContinuation = true
+
+ defer func() {
+ *b.inContinuation = false
+
+ r := recover()
+ if r == nil {
+ return
+ }
+
+ if buildError, ok := r.(BuildError); ok {
+ b.err = buildError.Err
+ } else {
+ panic(r)
+ }
+ }()
+ }
+
+ f(arg)
+}
+
+func (b *Builder) addLengthPrefixed(lenLen int, isASN1 bool, f BuilderContinuation) {
+ // Subsequent writes can be ignored if the builder has encountered an error.
+ if b.err != nil {
+ return
+ }
+
+ offset := len(b.result)
+ b.add(make([]byte, lenLen)...)
+
+ if b.inContinuation == nil {
+ b.inContinuation = new(bool)
+ }
+
+ b.child = &Builder{
+ result: b.result,
+ fixedSize: b.fixedSize,
+ offset: offset,
+ pendingLenLen: lenLen,
+ pendingIsASN1: isASN1,
+ inContinuation: b.inContinuation,
+ }
+
+ b.callContinuation(f, b.child)
+ b.flushChild()
+ if b.child != nil {
+ panic("cryptobyte: internal error")
+ }
+}
+
+func (b *Builder) flushChild() {
+ if b.child == nil {
+ return
+ }
+ b.child.flushChild()
+ child := b.child
+ b.child = nil
+
+ if child.err != nil {
+ b.err = child.err
+ return
+ }
+
+ length := len(child.result) - child.pendingLenLen - child.offset
+
+ if length < 0 {
+ panic("cryptobyte: internal error") // result unexpectedly shrunk
+ }
+
+ if child.pendingIsASN1 {
+ // For ASN.1, we reserved a single byte for the length. If that turned out
+ // to be incorrect, we have to move the contents along in order to make
+ // space.
+ if child.pendingLenLen != 1 {
+ panic("cryptobyte: internal error")
+ }
+ var lenLen, lenByte uint8
+ if int64(length) > 0xfffffffe {
+ b.err = errors.New("pending ASN.1 child too long")
+ return
+ } else if length > 0xffffff {
+ lenLen = 5
+ lenByte = 0x80 | 4
+ } else if length > 0xffff {
+ lenLen = 4
+ lenByte = 0x80 | 3
+ } else if length > 0xff {
+ lenLen = 3
+ lenByte = 0x80 | 2
+ } else if length > 0x7f {
+ lenLen = 2
+ lenByte = 0x80 | 1
+ } else {
+ lenLen = 1
+ lenByte = uint8(length)
+ length = 0
+ }
+
+ // Insert the initial length byte, make space for successive length bytes,
+ // and adjust the offset.
+ child.result[child.offset] = lenByte
+ extraBytes := int(lenLen - 1)
+ if extraBytes != 0 {
+ child.add(make([]byte, extraBytes)...)
+ childStart := child.offset + child.pendingLenLen
+ copy(child.result[childStart+extraBytes:], child.result[childStart:])
+ }
+ child.offset++
+ child.pendingLenLen = extraBytes
+ }
+
+ l := length
+ for i := child.pendingLenLen - 1; i >= 0; i-- {
+ child.result[child.offset+i] = uint8(l)
+ l >>= 8
+ }
+ if l != 0 {
+ b.err = fmt.Errorf("cryptobyte: pending child length %d exceeds %d-byte length prefix", length, child.pendingLenLen)
+ return
+ }
+
+ if b.fixedSize && &b.result[0] != &child.result[0] {
+ panic("cryptobyte: BuilderContinuation reallocated a fixed-size buffer")
+ }
+
+ b.result = child.result
+}
+
+func (b *Builder) add(bytes ...byte) {
+ if b.err != nil {
+ return
+ }
+ if b.child != nil {
+ panic("cryptobyte: attempted write while child is pending")
+ }
+ if len(b.result)+len(bytes) < len(bytes) {
+ b.err = errors.New("cryptobyte: length overflow")
+ }
+ if b.fixedSize && len(b.result)+len(bytes) > cap(b.result) {
+ b.err = errors.New("cryptobyte: Builder is exceeding its fixed-size buffer")
+ return
+ }
+ b.result = append(b.result, bytes...)
+}
+
+// Unwrite rolls back non-negative n bytes written directly to the Builder.
+// An attempt by a child builder passed to a continuation to unwrite bytes
+// from its parent will panic.
+func (b *Builder) Unwrite(n int) {
+ if b.err != nil {
+ return
+ }
+ if b.child != nil {
+ panic("cryptobyte: attempted unwrite while child is pending")
+ }
+ length := len(b.result) - b.pendingLenLen - b.offset
+ if length < 0 {
+ panic("cryptobyte: internal error")
+ }
+ if n < 0 {
+ panic("cryptobyte: attempted to unwrite negative number of bytes")
+ }
+ if n > length {
+ panic("cryptobyte: attempted to unwrite more than was written")
+ }
+ b.result = b.result[:len(b.result)-n]
+}
+
+// A MarshalingValue marshals itself into a Builder.
+type MarshalingValue interface {
+ // Marshal is called by Builder.AddValue. It receives a pointer to a builder
+ // to marshal itself into. It may return an error that occurred during
+ // marshaling, such as unset or invalid values.
+ Marshal(b *Builder) error
+}
+
+// AddValue calls Marshal on v, passing a pointer to the builder to append to.
+// If Marshal returns an error, it is set on the Builder so that subsequent
+// appends don't have an effect.
+func (b *Builder) AddValue(v MarshalingValue) {
+ err := v.Marshal(b)
+ if err != nil {
+ b.err = err
+ }
+}
diff --git a/vendor/golang.org/x/crypto/cryptobyte/string.go b/vendor/golang.org/x/crypto/cryptobyte/string.go
new file mode 100644
index 000000000..4b0f8097f
--- /dev/null
+++ b/vendor/golang.org/x/crypto/cryptobyte/string.go
@@ -0,0 +1,183 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package cryptobyte contains types that help with parsing and constructing
+// length-prefixed, binary messages, including ASN.1 DER. (The asn1 subpackage
+// contains useful ASN.1 constants.)
+//
+// The String type is for parsing. It wraps a []byte slice and provides helper
+// functions for consuming structures, value by value.
+//
+// The Builder type is for constructing messages. It providers helper functions
+// for appending values and also for appending length-prefixed submessages –
+// without having to worry about calculating the length prefix ahead of time.
+//
+// See the documentation and examples for the Builder and String types to get
+// started.
+package cryptobyte
+
+// String represents a string of bytes. It provides methods for parsing
+// fixed-length and length-prefixed values from it.
+type String []byte
+
+// read advances a String by n bytes and returns them. If less than n bytes
+// remain, it returns nil.
+func (s *String) read(n int) []byte {
+ if len(*s) < n || n < 0 {
+ return nil
+ }
+ v := (*s)[:n]
+ *s = (*s)[n:]
+ return v
+}
+
+// Skip advances the String by n byte and reports whether it was successful.
+func (s *String) Skip(n int) bool {
+ return s.read(n) != nil
+}
+
+// ReadUint8 decodes an 8-bit value into out and advances over it.
+// It reports whether the read was successful.
+func (s *String) ReadUint8(out *uint8) bool {
+ v := s.read(1)
+ if v == nil {
+ return false
+ }
+ *out = uint8(v[0])
+ return true
+}
+
+// ReadUint16 decodes a big-endian, 16-bit value into out and advances over it.
+// It reports whether the read was successful.
+func (s *String) ReadUint16(out *uint16) bool {
+ v := s.read(2)
+ if v == nil {
+ return false
+ }
+ *out = uint16(v[0])<<8 | uint16(v[1])
+ return true
+}
+
+// ReadUint24 decodes a big-endian, 24-bit value into out and advances over it.
+// It reports whether the read was successful.
+func (s *String) ReadUint24(out *uint32) bool {
+ v := s.read(3)
+ if v == nil {
+ return false
+ }
+ *out = uint32(v[0])<<16 | uint32(v[1])<<8 | uint32(v[2])
+ return true
+}
+
+// ReadUint32 decodes a big-endian, 32-bit value into out and advances over it.
+// It reports whether the read was successful.
+func (s *String) ReadUint32(out *uint32) bool {
+ v := s.read(4)
+ if v == nil {
+ return false
+ }
+ *out = uint32(v[0])<<24 | uint32(v[1])<<16 | uint32(v[2])<<8 | uint32(v[3])
+ return true
+}
+
+// ReadUint48 decodes a big-endian, 48-bit value into out and advances over it.
+// It reports whether the read was successful.
+func (s *String) ReadUint48(out *uint64) bool {
+ v := s.read(6)
+ if v == nil {
+ return false
+ }
+ *out = uint64(v[0])<<40 | uint64(v[1])<<32 | uint64(v[2])<<24 | uint64(v[3])<<16 | uint64(v[4])<<8 | uint64(v[5])
+ return true
+}
+
+// ReadUint64 decodes a big-endian, 64-bit value into out and advances over it.
+// It reports whether the read was successful.
+func (s *String) ReadUint64(out *uint64) bool {
+ v := s.read(8)
+ if v == nil {
+ return false
+ }
+ *out = uint64(v[0])<<56 | uint64(v[1])<<48 | uint64(v[2])<<40 | uint64(v[3])<<32 | uint64(v[4])<<24 | uint64(v[5])<<16 | uint64(v[6])<<8 | uint64(v[7])
+ return true
+}
+
+func (s *String) readUnsigned(out *uint32, length int) bool {
+ v := s.read(length)
+ if v == nil {
+ return false
+ }
+ var result uint32
+ for i := 0; i < length; i++ {
+ result <<= 8
+ result |= uint32(v[i])
+ }
+ *out = result
+ return true
+}
+
+func (s *String) readLengthPrefixed(lenLen int, outChild *String) bool {
+ lenBytes := s.read(lenLen)
+ if lenBytes == nil {
+ return false
+ }
+ var length uint32
+ for _, b := range lenBytes {
+ length = length << 8
+ length = length | uint32(b)
+ }
+ v := s.read(int(length))
+ if v == nil {
+ return false
+ }
+ *outChild = v
+ return true
+}
+
+// ReadUint8LengthPrefixed reads the content of an 8-bit length-prefixed value
+// into out and advances over it. It reports whether the read was successful.
+func (s *String) ReadUint8LengthPrefixed(out *String) bool {
+ return s.readLengthPrefixed(1, out)
+}
+
+// ReadUint16LengthPrefixed reads the content of a big-endian, 16-bit
+// length-prefixed value into out and advances over it. It reports whether the
+// read was successful.
+func (s *String) ReadUint16LengthPrefixed(out *String) bool {
+ return s.readLengthPrefixed(2, out)
+}
+
+// ReadUint24LengthPrefixed reads the content of a big-endian, 24-bit
+// length-prefixed value into out and advances over it. It reports whether
+// the read was successful.
+func (s *String) ReadUint24LengthPrefixed(out *String) bool {
+ return s.readLengthPrefixed(3, out)
+}
+
+// ReadBytes reads n bytes into out and advances over them. It reports
+// whether the read was successful.
+func (s *String) ReadBytes(out *[]byte, n int) bool {
+ v := s.read(n)
+ if v == nil {
+ return false
+ }
+ *out = v
+ return true
+}
+
+// CopyBytes copies len(out) bytes into out and advances over them. It reports
+// whether the copy operation was successful
+func (s *String) CopyBytes(out []byte) bool {
+ n := len(out)
+ v := s.read(n)
+ if v == nil {
+ return false
+ }
+ return copy(out, v) == n
+}
+
+// Empty reports whether the string does not contain any bytes.
+func (s String) Empty() bool {
+ return len(s) == 0
+}
diff --git a/vendor/golang.org/x/exp/LICENSE b/vendor/golang.org/x/exp/LICENSE
new file mode 100644
index 000000000..2a7cf70da
--- /dev/null
+++ b/vendor/golang.org/x/exp/LICENSE
@@ -0,0 +1,27 @@
+Copyright 2009 The Go Authors.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google LLC nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/exp/PATENTS b/vendor/golang.org/x/exp/PATENTS
new file mode 100644
index 000000000..733099041
--- /dev/null
+++ b/vendor/golang.org/x/exp/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/vendor/golang.org/x/exp/constraints/constraints.go b/vendor/golang.org/x/exp/constraints/constraints.go
new file mode 100644
index 000000000..2c033dff4
--- /dev/null
+++ b/vendor/golang.org/x/exp/constraints/constraints.go
@@ -0,0 +1,50 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package constraints defines a set of useful constraints to be used
+// with type parameters.
+package constraints
+
+// Signed is a constraint that permits any signed integer type.
+// If future releases of Go add new predeclared signed integer types,
+// this constraint will be modified to include them.
+type Signed interface {
+ ~int | ~int8 | ~int16 | ~int32 | ~int64
+}
+
+// Unsigned is a constraint that permits any unsigned integer type.
+// If future releases of Go add new predeclared unsigned integer types,
+// this constraint will be modified to include them.
+type Unsigned interface {
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr
+}
+
+// Integer is a constraint that permits any integer type.
+// If future releases of Go add new predeclared integer types,
+// this constraint will be modified to include them.
+type Integer interface {
+ Signed | Unsigned
+}
+
+// Float is a constraint that permits any floating-point type.
+// If future releases of Go add new predeclared floating-point types,
+// this constraint will be modified to include them.
+type Float interface {
+ ~float32 | ~float64
+}
+
+// Complex is a constraint that permits any complex numeric type.
+// If future releases of Go add new predeclared complex numeric types,
+// this constraint will be modified to include them.
+type Complex interface {
+ ~complex64 | ~complex128
+}
+
+// Ordered is a constraint that permits any ordered type: any type
+// that supports the operators < <= >= >.
+// If future releases of Go add new ordered types,
+// this constraint will be modified to include them.
+type Ordered interface {
+ Integer | Float | ~string
+}
diff --git a/vendor/golang.org/x/exp/slices/cmp.go b/vendor/golang.org/x/exp/slices/cmp.go
new file mode 100644
index 000000000..fbf1934a0
--- /dev/null
+++ b/vendor/golang.org/x/exp/slices/cmp.go
@@ -0,0 +1,44 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slices
+
+import "golang.org/x/exp/constraints"
+
+// min is a version of the predeclared function from the Go 1.21 release.
+func min[T constraints.Ordered](a, b T) T {
+ if a < b || isNaN(a) {
+ return a
+ }
+ return b
+}
+
+// max is a version of the predeclared function from the Go 1.21 release.
+func max[T constraints.Ordered](a, b T) T {
+ if a > b || isNaN(a) {
+ return a
+ }
+ return b
+}
+
+// cmpLess is a copy of cmp.Less from the Go 1.21 release.
+func cmpLess[T constraints.Ordered](x, y T) bool {
+ return (isNaN(x) && !isNaN(y)) || x < y
+}
+
+// cmpCompare is a copy of cmp.Compare from the Go 1.21 release.
+func cmpCompare[T constraints.Ordered](x, y T) int {
+ xNaN := isNaN(x)
+ yNaN := isNaN(y)
+ if xNaN && yNaN {
+ return 0
+ }
+ if xNaN || x < y {
+ return -1
+ }
+ if yNaN || x > y {
+ return +1
+ }
+ return 0
+}
diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go
new file mode 100644
index 000000000..46ceac343
--- /dev/null
+++ b/vendor/golang.org/x/exp/slices/slices.go
@@ -0,0 +1,515 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package slices defines various functions useful with slices of any type.
+package slices
+
+import (
+ "unsafe"
+
+ "golang.org/x/exp/constraints"
+)
+
+// Equal reports whether two slices are equal: the same length and all
+// elements equal. If the lengths are different, Equal returns false.
+// Otherwise, the elements are compared in increasing index order, and the
+// comparison stops at the first unequal pair.
+// Floating point NaNs are not considered equal.
+func Equal[S ~[]E, E comparable](s1, s2 S) bool {
+ if len(s1) != len(s2) {
+ return false
+ }
+ for i := range s1 {
+ if s1[i] != s2[i] {
+ return false
+ }
+ }
+ return true
+}
+
+// EqualFunc reports whether two slices are equal using an equality
+// function on each pair of elements. If the lengths are different,
+// EqualFunc returns false. Otherwise, the elements are compared in
+// increasing index order, and the comparison stops at the first index
+// for which eq returns false.
+func EqualFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) bool) bool {
+ if len(s1) != len(s2) {
+ return false
+ }
+ for i, v1 := range s1 {
+ v2 := s2[i]
+ if !eq(v1, v2) {
+ return false
+ }
+ }
+ return true
+}
+
+// Compare compares the elements of s1 and s2, using [cmp.Compare] on each pair
+// of elements. The elements are compared sequentially, starting at index 0,
+// until one element is not equal to the other.
+// The result of comparing the first non-matching elements is returned.
+// If both slices are equal until one of them ends, the shorter slice is
+// considered less than the longer one.
+// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2.
+func Compare[S ~[]E, E constraints.Ordered](s1, s2 S) int {
+ for i, v1 := range s1 {
+ if i >= len(s2) {
+ return +1
+ }
+ v2 := s2[i]
+ if c := cmpCompare(v1, v2); c != 0 {
+ return c
+ }
+ }
+ if len(s1) < len(s2) {
+ return -1
+ }
+ return 0
+}
+
+// CompareFunc is like [Compare] but uses a custom comparison function on each
+// pair of elements.
+// The result is the first non-zero result of cmp; if cmp always
+// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2),
+// and +1 if len(s1) > len(s2).
+func CompareFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, cmp func(E1, E2) int) int {
+ for i, v1 := range s1 {
+ if i >= len(s2) {
+ return +1
+ }
+ v2 := s2[i]
+ if c := cmp(v1, v2); c != 0 {
+ return c
+ }
+ }
+ if len(s1) < len(s2) {
+ return -1
+ }
+ return 0
+}
+
+// Index returns the index of the first occurrence of v in s,
+// or -1 if not present.
+func Index[S ~[]E, E comparable](s S, v E) int {
+ for i := range s {
+ if v == s[i] {
+ return i
+ }
+ }
+ return -1
+}
+
+// IndexFunc returns the first index i satisfying f(s[i]),
+// or -1 if none do.
+func IndexFunc[S ~[]E, E any](s S, f func(E) bool) int {
+ for i := range s {
+ if f(s[i]) {
+ return i
+ }
+ }
+ return -1
+}
+
+// Contains reports whether v is present in s.
+func Contains[S ~[]E, E comparable](s S, v E) bool {
+ return Index(s, v) >= 0
+}
+
+// ContainsFunc reports whether at least one
+// element e of s satisfies f(e).
+func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool {
+ return IndexFunc(s, f) >= 0
+}
+
+// Insert inserts the values v... into s at index i,
+// returning the modified slice.
+// The elements at s[i:] are shifted up to make room.
+// In the returned slice r, r[i] == v[0],
+// and r[i+len(v)] == value originally at r[i].
+// Insert panics if i is out of range.
+// This function is O(len(s) + len(v)).
+func Insert[S ~[]E, E any](s S, i int, v ...E) S {
+ m := len(v)
+ if m == 0 {
+ return s
+ }
+ n := len(s)
+ if i == n {
+ return append(s, v...)
+ }
+ if n+m > cap(s) {
+ // Use append rather than make so that we bump the size of
+ // the slice up to the next storage class.
+ // This is what Grow does but we don't call Grow because
+ // that might copy the values twice.
+ s2 := append(s[:i], make(S, n+m-i)...)
+ copy(s2[i:], v)
+ copy(s2[i+m:], s[i:])
+ return s2
+ }
+ s = s[:n+m]
+
+ // before:
+ // s: aaaaaaaabbbbccccccccdddd
+ // ^ ^ ^ ^
+ // i i+m n n+m
+ // after:
+ // s: aaaaaaaavvvvbbbbcccccccc
+ // ^ ^ ^ ^
+ // i i+m n n+m
+ //
+ // a are the values that don't move in s.
+ // v are the values copied in from v.
+ // b and c are the values from s that are shifted up in index.
+ // d are the values that get overwritten, never to be seen again.
+
+ if !overlaps(v, s[i+m:]) {
+ // Easy case - v does not overlap either the c or d regions.
+ // (It might be in some of a or b, or elsewhere entirely.)
+ // The data we copy up doesn't write to v at all, so just do it.
+
+ copy(s[i+m:], s[i:])
+
+ // Now we have
+ // s: aaaaaaaabbbbbbbbcccccccc
+ // ^ ^ ^ ^
+ // i i+m n n+m
+ // Note the b values are duplicated.
+
+ copy(s[i:], v)
+
+ // Now we have
+ // s: aaaaaaaavvvvbbbbcccccccc
+ // ^ ^ ^ ^
+ // i i+m n n+m
+ // That's the result we want.
+ return s
+ }
+
+ // The hard case - v overlaps c or d. We can't just shift up
+ // the data because we'd move or clobber the values we're trying
+ // to insert.
+ // So instead, write v on top of d, then rotate.
+ copy(s[n:], v)
+
+ // Now we have
+ // s: aaaaaaaabbbbccccccccvvvv
+ // ^ ^ ^ ^
+ // i i+m n n+m
+
+ rotateRight(s[i:], m)
+
+ // Now we have
+ // s: aaaaaaaavvvvbbbbcccccccc
+ // ^ ^ ^ ^
+ // i i+m n n+m
+ // That's the result we want.
+ return s
+}
+
+// clearSlice sets all elements up to the length of s to the zero value of E.
+// We may use the builtin clear func instead, and remove clearSlice, when upgrading
+// to Go 1.21+.
+func clearSlice[S ~[]E, E any](s S) {
+ var zero E
+ for i := range s {
+ s[i] = zero
+ }
+}
+
+// Delete removes the elements s[i:j] from s, returning the modified slice.
+// Delete panics if j > len(s) or s[i:j] is not a valid slice of s.
+// Delete is O(len(s)-i), so if many items must be deleted, it is better to
+// make a single call deleting them all together than to delete one at a time.
+// Delete zeroes the elements s[len(s)-(j-i):len(s)].
+func Delete[S ~[]E, E any](s S, i, j int) S {
+ _ = s[i:j:len(s)] // bounds check
+
+ if i == j {
+ return s
+ }
+
+ oldlen := len(s)
+ s = append(s[:i], s[j:]...)
+ clearSlice(s[len(s):oldlen]) // zero/nil out the obsolete elements, for GC
+ return s
+}
+
+// DeleteFunc removes any elements from s for which del returns true,
+// returning the modified slice.
+// DeleteFunc zeroes the elements between the new length and the original length.
+func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S {
+ i := IndexFunc(s, del)
+ if i == -1 {
+ return s
+ }
+ // Don't start copying elements until we find one to delete.
+ for j := i + 1; j < len(s); j++ {
+ if v := s[j]; !del(v) {
+ s[i] = v
+ i++
+ }
+ }
+ clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC
+ return s[:i]
+}
+
+// Replace replaces the elements s[i:j] by the given v, and returns the
+// modified slice. Replace panics if s[i:j] is not a valid slice of s.
+// When len(v) < (j-i), Replace zeroes the elements between the new length and the original length.
+func Replace[S ~[]E, E any](s S, i, j int, v ...E) S {
+ _ = s[i:j] // verify that i:j is a valid subslice
+
+ if i == j {
+ return Insert(s, i, v...)
+ }
+ if j == len(s) {
+ return append(s[:i], v...)
+ }
+
+ tot := len(s[:i]) + len(v) + len(s[j:])
+ if tot > cap(s) {
+ // Too big to fit, allocate and copy over.
+ s2 := append(s[:i], make(S, tot-i)...) // See Insert
+ copy(s2[i:], v)
+ copy(s2[i+len(v):], s[j:])
+ return s2
+ }
+
+ r := s[:tot]
+
+ if i+len(v) <= j {
+ // Easy, as v fits in the deleted portion.
+ copy(r[i:], v)
+ if i+len(v) != j {
+ copy(r[i+len(v):], s[j:])
+ }
+ clearSlice(s[tot:]) // zero/nil out the obsolete elements, for GC
+ return r
+ }
+
+ // We are expanding (v is bigger than j-i).
+ // The situation is something like this:
+ // (example has i=4,j=8,len(s)=16,len(v)=6)
+ // s: aaaaxxxxbbbbbbbbyy
+ // ^ ^ ^ ^
+ // i j len(s) tot
+ // a: prefix of s
+ // x: deleted range
+ // b: more of s
+ // y: area to expand into
+
+ if !overlaps(r[i+len(v):], v) {
+ // Easy, as v is not clobbered by the first copy.
+ copy(r[i+len(v):], s[j:])
+ copy(r[i:], v)
+ return r
+ }
+
+ // This is a situation where we don't have a single place to which
+ // we can copy v. Parts of it need to go to two different places.
+ // We want to copy the prefix of v into y and the suffix into x, then
+ // rotate |y| spots to the right.
+ //
+ // v[2:] v[:2]
+ // | |
+ // s: aaaavvvvbbbbbbbbvv
+ // ^ ^ ^ ^
+ // i j len(s) tot
+ //
+ // If either of those two destinations don't alias v, then we're good.
+ y := len(v) - (j - i) // length of y portion
+
+ if !overlaps(r[i:j], v) {
+ copy(r[i:j], v[y:])
+ copy(r[len(s):], v[:y])
+ rotateRight(r[i:], y)
+ return r
+ }
+ if !overlaps(r[len(s):], v) {
+ copy(r[len(s):], v[:y])
+ copy(r[i:j], v[y:])
+ rotateRight(r[i:], y)
+ return r
+ }
+
+ // Now we know that v overlaps both x and y.
+ // That means that the entirety of b is *inside* v.
+ // So we don't need to preserve b at all; instead we
+ // can copy v first, then copy the b part of v out of
+ // v to the right destination.
+ k := startIdx(v, s[j:])
+ copy(r[i:], v)
+ copy(r[i+len(v):], r[i+k:])
+ return r
+}
+
+// Clone returns a copy of the slice.
+// The elements are copied using assignment, so this is a shallow clone.
+func Clone[S ~[]E, E any](s S) S {
+ // Preserve nil in case it matters.
+ if s == nil {
+ return nil
+ }
+ return append(S([]E{}), s...)
+}
+
+// Compact replaces consecutive runs of equal elements with a single copy.
+// This is like the uniq command found on Unix.
+// Compact modifies the contents of the slice s and returns the modified slice,
+// which may have a smaller length.
+// Compact zeroes the elements between the new length and the original length.
+func Compact[S ~[]E, E comparable](s S) S {
+ if len(s) < 2 {
+ return s
+ }
+ i := 1
+ for k := 1; k < len(s); k++ {
+ if s[k] != s[k-1] {
+ if i != k {
+ s[i] = s[k]
+ }
+ i++
+ }
+ }
+ clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC
+ return s[:i]
+}
+
+// CompactFunc is like [Compact] but uses an equality function to compare elements.
+// For runs of elements that compare equal, CompactFunc keeps the first one.
+// CompactFunc zeroes the elements between the new length and the original length.
+func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S {
+ if len(s) < 2 {
+ return s
+ }
+ i := 1
+ for k := 1; k < len(s); k++ {
+ if !eq(s[k], s[k-1]) {
+ if i != k {
+ s[i] = s[k]
+ }
+ i++
+ }
+ }
+ clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC
+ return s[:i]
+}
+
+// Grow increases the slice's capacity, if necessary, to guarantee space for
+// another n elements. After Grow(n), at least n elements can be appended
+// to the slice without another allocation. If n is negative or too large to
+// allocate the memory, Grow panics.
+func Grow[S ~[]E, E any](s S, n int) S {
+ if n < 0 {
+ panic("cannot be negative")
+ }
+ if n -= cap(s) - len(s); n > 0 {
+ // TODO(https://go.dev/issue/53888): Make using []E instead of S
+ // to workaround a compiler bug where the runtime.growslice optimization
+ // does not take effect. Revert when the compiler is fixed.
+ s = append([]E(s)[:cap(s)], make([]E, n)...)[:len(s)]
+ }
+ return s
+}
+
+// Clip removes unused capacity from the slice, returning s[:len(s):len(s)].
+func Clip[S ~[]E, E any](s S) S {
+ return s[:len(s):len(s)]
+}
+
+// Rotation algorithm explanation:
+//
+// rotate left by 2
+// start with
+// 0123456789
+// split up like this
+// 01 234567 89
+// swap first 2 and last 2
+// 89 234567 01
+// join first parts
+// 89234567 01
+// recursively rotate first left part by 2
+// 23456789 01
+// join at the end
+// 2345678901
+//
+// rotate left by 8
+// start with
+// 0123456789
+// split up like this
+// 01 234567 89
+// swap first 2 and last 2
+// 89 234567 01
+// join last parts
+// 89 23456701
+// recursively rotate second part left by 6
+// 89 01234567
+// join at the end
+// 8901234567
+
+// TODO: There are other rotate algorithms.
+// This algorithm has the desirable property that it moves each element exactly twice.
+// The triple-reverse algorithm is simpler and more cache friendly, but takes more writes.
+// The follow-cycles algorithm can be 1-write but it is not very cache friendly.
+
+// rotateLeft rotates b left by n spaces.
+// s_final[i] = s_orig[i+r], wrapping around.
+func rotateLeft[E any](s []E, r int) {
+ for r != 0 && r != len(s) {
+ if r*2 <= len(s) {
+ swap(s[:r], s[len(s)-r:])
+ s = s[:len(s)-r]
+ } else {
+ swap(s[:len(s)-r], s[r:])
+ s, r = s[len(s)-r:], r*2-len(s)
+ }
+ }
+}
+func rotateRight[E any](s []E, r int) {
+ rotateLeft(s, len(s)-r)
+}
+
+// swap swaps the contents of x and y. x and y must be equal length and disjoint.
+func swap[E any](x, y []E) {
+ for i := 0; i < len(x); i++ {
+ x[i], y[i] = y[i], x[i]
+ }
+}
+
+// overlaps reports whether the memory ranges a[0:len(a)] and b[0:len(b)] overlap.
+func overlaps[E any](a, b []E) bool {
+ if len(a) == 0 || len(b) == 0 {
+ return false
+ }
+ elemSize := unsafe.Sizeof(a[0])
+ if elemSize == 0 {
+ return false
+ }
+ // TODO: use a runtime/unsafe facility once one becomes available. See issue 12445.
+ // Also see crypto/internal/alias/alias.go:AnyOverlap
+ return uintptr(unsafe.Pointer(&a[0])) <= uintptr(unsafe.Pointer(&b[len(b)-1]))+(elemSize-1) &&
+ uintptr(unsafe.Pointer(&b[0])) <= uintptr(unsafe.Pointer(&a[len(a)-1]))+(elemSize-1)
+}
+
+// startIdx returns the index in haystack where the needle starts.
+// prerequisite: the needle must be aliased entirely inside the haystack.
+func startIdx[E any](haystack, needle []E) int {
+ p := &needle[0]
+ for i := range haystack {
+ if p == &haystack[i] {
+ return i
+ }
+ }
+ // TODO: what if the overlap is by a non-integral number of Es?
+ panic("needle not found")
+}
+
+// Reverse reverses the elements of the slice in place.
+func Reverse[S ~[]E, E any](s S) {
+ for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 {
+ s[i], s[j] = s[j], s[i]
+ }
+}
diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go
new file mode 100644
index 000000000..f58bbc7ba
--- /dev/null
+++ b/vendor/golang.org/x/exp/slices/sort.go
@@ -0,0 +1,197 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate go run $GOROOT/src/sort/gen_sort_variants.go -exp
+
+package slices
+
+import (
+ "math/bits"
+
+ "golang.org/x/exp/constraints"
+)
+
+// Sort sorts a slice of any ordered type in ascending order.
+// When sorting floating-point numbers, NaNs are ordered before other values.
+func Sort[S ~[]E, E constraints.Ordered](x S) {
+ n := len(x)
+ pdqsortOrdered(x, 0, n, bits.Len(uint(n)))
+}
+
+// SortFunc sorts the slice x in ascending order as determined by the cmp
+// function. This sort is not guaranteed to be stable.
+// cmp(a, b) should return a negative number when a < b, a positive number when
+// a > b and zero when a == b or when a is not comparable to b in the sense
+// of the formal definition of Strict Weak Ordering.
+//
+// SortFunc requires that cmp is a strict weak ordering.
+// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings.
+// To indicate 'uncomparable', return 0 from the function.
+func SortFunc[S ~[]E, E any](x S, cmp func(a, b E) int) {
+ n := len(x)
+ pdqsortCmpFunc(x, 0, n, bits.Len(uint(n)), cmp)
+}
+
+// SortStableFunc sorts the slice x while keeping the original order of equal
+// elements, using cmp to compare elements in the same way as [SortFunc].
+func SortStableFunc[S ~[]E, E any](x S, cmp func(a, b E) int) {
+ stableCmpFunc(x, len(x), cmp)
+}
+
+// IsSorted reports whether x is sorted in ascending order.
+func IsSorted[S ~[]E, E constraints.Ordered](x S) bool {
+ for i := len(x) - 1; i > 0; i-- {
+ if cmpLess(x[i], x[i-1]) {
+ return false
+ }
+ }
+ return true
+}
+
+// IsSortedFunc reports whether x is sorted in ascending order, with cmp as the
+// comparison function as defined by [SortFunc].
+func IsSortedFunc[S ~[]E, E any](x S, cmp func(a, b E) int) bool {
+ for i := len(x) - 1; i > 0; i-- {
+ if cmp(x[i], x[i-1]) < 0 {
+ return false
+ }
+ }
+ return true
+}
+
+// Min returns the minimal value in x. It panics if x is empty.
+// For floating-point numbers, Min propagates NaNs (any NaN value in x
+// forces the output to be NaN).
+func Min[S ~[]E, E constraints.Ordered](x S) E {
+ if len(x) < 1 {
+ panic("slices.Min: empty list")
+ }
+ m := x[0]
+ for i := 1; i < len(x); i++ {
+ m = min(m, x[i])
+ }
+ return m
+}
+
+// MinFunc returns the minimal value in x, using cmp to compare elements.
+// It panics if x is empty. If there is more than one minimal element
+// according to the cmp function, MinFunc returns the first one.
+func MinFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E {
+ if len(x) < 1 {
+ panic("slices.MinFunc: empty list")
+ }
+ m := x[0]
+ for i := 1; i < len(x); i++ {
+ if cmp(x[i], m) < 0 {
+ m = x[i]
+ }
+ }
+ return m
+}
+
+// Max returns the maximal value in x. It panics if x is empty.
+// For floating-point E, Max propagates NaNs (any NaN value in x
+// forces the output to be NaN).
+func Max[S ~[]E, E constraints.Ordered](x S) E {
+ if len(x) < 1 {
+ panic("slices.Max: empty list")
+ }
+ m := x[0]
+ for i := 1; i < len(x); i++ {
+ m = max(m, x[i])
+ }
+ return m
+}
+
+// MaxFunc returns the maximal value in x, using cmp to compare elements.
+// It panics if x is empty. If there is more than one maximal element
+// according to the cmp function, MaxFunc returns the first one.
+func MaxFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E {
+ if len(x) < 1 {
+ panic("slices.MaxFunc: empty list")
+ }
+ m := x[0]
+ for i := 1; i < len(x); i++ {
+ if cmp(x[i], m) > 0 {
+ m = x[i]
+ }
+ }
+ return m
+}
+
+// BinarySearch searches for target in a sorted slice and returns the position
+// where target is found, or the position where target would appear in the
+// sort order; it also returns a bool saying whether the target is really found
+// in the slice. The slice must be sorted in increasing order.
+func BinarySearch[S ~[]E, E constraints.Ordered](x S, target E) (int, bool) {
+ // Inlining is faster than calling BinarySearchFunc with a lambda.
+ n := len(x)
+ // Define x[-1] < target and x[n] >= target.
+ // Invariant: x[i-1] < target, x[j] >= target.
+ i, j := 0, n
+ for i < j {
+ h := int(uint(i+j) >> 1) // avoid overflow when computing h
+ // i ≤ h < j
+ if cmpLess(x[h], target) {
+ i = h + 1 // preserves x[i-1] < target
+ } else {
+ j = h // preserves x[j] >= target
+ }
+ }
+ // i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i.
+ return i, i < n && (x[i] == target || (isNaN(x[i]) && isNaN(target)))
+}
+
+// BinarySearchFunc works like [BinarySearch], but uses a custom comparison
+// function. The slice must be sorted in increasing order, where "increasing"
+// is defined by cmp. cmp should return 0 if the slice element matches
+// the target, a negative number if the slice element precedes the target,
+// or a positive number if the slice element follows the target.
+// cmp must implement the same ordering as the slice, such that if
+// cmp(a, t) < 0 and cmp(b, t) >= 0, then a must precede b in the slice.
+func BinarySearchFunc[S ~[]E, E, T any](x S, target T, cmp func(E, T) int) (int, bool) {
+ n := len(x)
+ // Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 .
+ // Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0.
+ i, j := 0, n
+ for i < j {
+ h := int(uint(i+j) >> 1) // avoid overflow when computing h
+ // i ≤ h < j
+ if cmp(x[h], target) < 0 {
+ i = h + 1 // preserves cmp(x[i - 1], target) < 0
+ } else {
+ j = h // preserves cmp(x[j], target) >= 0
+ }
+ }
+ // i == j, cmp(x[i-1], target) < 0, and cmp(x[j], target) (= cmp(x[i], target)) >= 0 => answer is i.
+ return i, i < n && cmp(x[i], target) == 0
+}
+
+type sortedHint int // hint for pdqsort when choosing the pivot
+
+const (
+ unknownHint sortedHint = iota
+ increasingHint
+ decreasingHint
+)
+
+// xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf
+type xorshift uint64
+
+func (r *xorshift) Next() uint64 {
+ *r ^= *r << 13
+ *r ^= *r >> 17
+ *r ^= *r << 5
+ return uint64(*r)
+}
+
+func nextPowerOfTwo(length int) uint {
+ return 1 << bits.Len(uint(length))
+}
+
+// isNaN reports whether x is a NaN without requiring the math package.
+// This will always return false if T is not floating-point.
+func isNaN[T constraints.Ordered](x T) bool {
+ return x != x
+}
diff --git a/vendor/golang.org/x/exp/slices/zsortanyfunc.go b/vendor/golang.org/x/exp/slices/zsortanyfunc.go
new file mode 100644
index 000000000..06f2c7a24
--- /dev/null
+++ b/vendor/golang.org/x/exp/slices/zsortanyfunc.go
@@ -0,0 +1,479 @@
+// Code generated by gen_sort_variants.go; DO NOT EDIT.
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slices
+
+// insertionSortCmpFunc sorts data[a:b] using insertion sort.
+func insertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
+ for i := a + 1; i < b; i++ {
+ for j := i; j > a && (cmp(data[j], data[j-1]) < 0); j-- {
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+}
+
+// siftDownCmpFunc implements the heap property on data[lo:hi].
+// first is an offset into the array where the root of the heap lies.
+func siftDownCmpFunc[E any](data []E, lo, hi, first int, cmp func(a, b E) int) {
+ root := lo
+ for {
+ child := 2*root + 1
+ if child >= hi {
+ break
+ }
+ if child+1 < hi && (cmp(data[first+child], data[first+child+1]) < 0) {
+ child++
+ }
+ if !(cmp(data[first+root], data[first+child]) < 0) {
+ return
+ }
+ data[first+root], data[first+child] = data[first+child], data[first+root]
+ root = child
+ }
+}
+
+func heapSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
+ first := a
+ lo := 0
+ hi := b - a
+
+ // Build heap with greatest element at top.
+ for i := (hi - 1) / 2; i >= 0; i-- {
+ siftDownCmpFunc(data, i, hi, first, cmp)
+ }
+
+ // Pop elements, largest first, into end of data.
+ for i := hi - 1; i >= 0; i-- {
+ data[first], data[first+i] = data[first+i], data[first]
+ siftDownCmpFunc(data, lo, i, first, cmp)
+ }
+}
+
+// pdqsortCmpFunc sorts data[a:b].
+// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
+// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
+// C++ implementation: https://github.com/orlp/pdqsort
+// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
+// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
+func pdqsortCmpFunc[E any](data []E, a, b, limit int, cmp func(a, b E) int) {
+ const maxInsertion = 12
+
+ var (
+ wasBalanced = true // whether the last partitioning was reasonably balanced
+ wasPartitioned = true // whether the slice was already partitioned
+ )
+
+ for {
+ length := b - a
+
+ if length <= maxInsertion {
+ insertionSortCmpFunc(data, a, b, cmp)
+ return
+ }
+
+ // Fall back to heapsort if too many bad choices were made.
+ if limit == 0 {
+ heapSortCmpFunc(data, a, b, cmp)
+ return
+ }
+
+ // If the last partitioning was imbalanced, we need to breaking patterns.
+ if !wasBalanced {
+ breakPatternsCmpFunc(data, a, b, cmp)
+ limit--
+ }
+
+ pivot, hint := choosePivotCmpFunc(data, a, b, cmp)
+ if hint == decreasingHint {
+ reverseRangeCmpFunc(data, a, b, cmp)
+ // The chosen pivot was pivot-a elements after the start of the array.
+ // After reversing it is pivot-a elements before the end of the array.
+ // The idea came from Rust's implementation.
+ pivot = (b - 1) - (pivot - a)
+ hint = increasingHint
+ }
+
+ // The slice is likely already sorted.
+ if wasBalanced && wasPartitioned && hint == increasingHint {
+ if partialInsertionSortCmpFunc(data, a, b, cmp) {
+ return
+ }
+ }
+
+ // Probably the slice contains many duplicate elements, partition the slice into
+ // elements equal to and elements greater than the pivot.
+ if a > 0 && !(cmp(data[a-1], data[pivot]) < 0) {
+ mid := partitionEqualCmpFunc(data, a, b, pivot, cmp)
+ a = mid
+ continue
+ }
+
+ mid, alreadyPartitioned := partitionCmpFunc(data, a, b, pivot, cmp)
+ wasPartitioned = alreadyPartitioned
+
+ leftLen, rightLen := mid-a, b-mid
+ balanceThreshold := length / 8
+ if leftLen < rightLen {
+ wasBalanced = leftLen >= balanceThreshold
+ pdqsortCmpFunc(data, a, mid, limit, cmp)
+ a = mid + 1
+ } else {
+ wasBalanced = rightLen >= balanceThreshold
+ pdqsortCmpFunc(data, mid+1, b, limit, cmp)
+ b = mid
+ }
+ }
+}
+
+// partitionCmpFunc does one quicksort partition.
+// Let p = data[pivot]
+// Moves elements in data[a:b] around, so that data[i]
=p for inewpivot.
+// On return, data[newpivot] = p
+func partitionCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int, alreadyPartitioned bool) {
+ data[a], data[pivot] = data[pivot], data[a]
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for i <= j && (cmp(data[i], data[a]) < 0) {
+ i++
+ }
+ for i <= j && !(cmp(data[j], data[a]) < 0) {
+ j--
+ }
+ if i > j {
+ data[j], data[a] = data[a], data[j]
+ return j, true
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+
+ for {
+ for i <= j && (cmp(data[i], data[a]) < 0) {
+ i++
+ }
+ for i <= j && !(cmp(data[j], data[a]) < 0) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+ data[j], data[a] = data[a], data[j]
+ return j, false
+}
+
+// partitionEqualCmpFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
+// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
+func partitionEqualCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int) {
+ data[a], data[pivot] = data[pivot], data[a]
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for {
+ for i <= j && !(cmp(data[a], data[i]) < 0) {
+ i++
+ }
+ for i <= j && (cmp(data[a], data[j]) < 0) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+ return i
+}
+
+// partialInsertionSortCmpFunc partially sorts a slice, returns true if the slice is sorted at the end.
+func partialInsertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) bool {
+ const (
+ maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
+ shortestShifting = 50 // don't shift any elements on short arrays
+ )
+ i := a + 1
+ for j := 0; j < maxSteps; j++ {
+ for i < b && !(cmp(data[i], data[i-1]) < 0) {
+ i++
+ }
+
+ if i == b {
+ return true
+ }
+
+ if b-a < shortestShifting {
+ return false
+ }
+
+ data[i], data[i-1] = data[i-1], data[i]
+
+ // Shift the smaller one to the left.
+ if i-a >= 2 {
+ for j := i - 1; j >= 1; j-- {
+ if !(cmp(data[j], data[j-1]) < 0) {
+ break
+ }
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+ // Shift the greater one to the right.
+ if b-i >= 2 {
+ for j := i + 1; j < b; j++ {
+ if !(cmp(data[j], data[j-1]) < 0) {
+ break
+ }
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+ }
+ return false
+}
+
+// breakPatternsCmpFunc scatters some elements around in an attempt to break some patterns
+// that might cause imbalanced partitions in quicksort.
+func breakPatternsCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
+ length := b - a
+ if length >= 8 {
+ random := xorshift(length)
+ modulus := nextPowerOfTwo(length)
+
+ for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
+ other := int(uint(random.Next()) & (modulus - 1))
+ if other >= length {
+ other -= length
+ }
+ data[idx], data[a+other] = data[a+other], data[idx]
+ }
+ }
+}
+
+// choosePivotCmpFunc chooses a pivot in data[a:b].
+//
+// [0,8): chooses a static pivot.
+// [8,shortestNinther): uses the simple median-of-three method.
+// [shortestNinther,∞): uses the Tukey ninther method.
+func choosePivotCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) (pivot int, hint sortedHint) {
+ const (
+ shortestNinther = 50
+ maxSwaps = 4 * 3
+ )
+
+ l := b - a
+
+ var (
+ swaps int
+ i = a + l/4*1
+ j = a + l/4*2
+ k = a + l/4*3
+ )
+
+ if l >= 8 {
+ if l >= shortestNinther {
+ // Tukey ninther method, the idea came from Rust's implementation.
+ i = medianAdjacentCmpFunc(data, i, &swaps, cmp)
+ j = medianAdjacentCmpFunc(data, j, &swaps, cmp)
+ k = medianAdjacentCmpFunc(data, k, &swaps, cmp)
+ }
+ // Find the median among i, j, k and stores it into j.
+ j = medianCmpFunc(data, i, j, k, &swaps, cmp)
+ }
+
+ switch swaps {
+ case 0:
+ return j, increasingHint
+ case maxSwaps:
+ return j, decreasingHint
+ default:
+ return j, unknownHint
+ }
+}
+
+// order2CmpFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
+func order2CmpFunc[E any](data []E, a, b int, swaps *int, cmp func(a, b E) int) (int, int) {
+ if cmp(data[b], data[a]) < 0 {
+ *swaps++
+ return b, a
+ }
+ return a, b
+}
+
+// medianCmpFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
+func medianCmpFunc[E any](data []E, a, b, c int, swaps *int, cmp func(a, b E) int) int {
+ a, b = order2CmpFunc(data, a, b, swaps, cmp)
+ b, c = order2CmpFunc(data, b, c, swaps, cmp)
+ a, b = order2CmpFunc(data, a, b, swaps, cmp)
+ return b
+}
+
+// medianAdjacentCmpFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
+func medianAdjacentCmpFunc[E any](data []E, a int, swaps *int, cmp func(a, b E) int) int {
+ return medianCmpFunc(data, a-1, a, a+1, swaps, cmp)
+}
+
+func reverseRangeCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
+ i := a
+ j := b - 1
+ for i < j {
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+}
+
+func swapRangeCmpFunc[E any](data []E, a, b, n int, cmp func(a, b E) int) {
+ for i := 0; i < n; i++ {
+ data[a+i], data[b+i] = data[b+i], data[a+i]
+ }
+}
+
+func stableCmpFunc[E any](data []E, n int, cmp func(a, b E) int) {
+ blockSize := 20 // must be > 0
+ a, b := 0, blockSize
+ for b <= n {
+ insertionSortCmpFunc(data, a, b, cmp)
+ a = b
+ b += blockSize
+ }
+ insertionSortCmpFunc(data, a, n, cmp)
+
+ for blockSize < n {
+ a, b = 0, 2*blockSize
+ for b <= n {
+ symMergeCmpFunc(data, a, a+blockSize, b, cmp)
+ a = b
+ b += 2 * blockSize
+ }
+ if m := a + blockSize; m < n {
+ symMergeCmpFunc(data, a, m, n, cmp)
+ }
+ blockSize *= 2
+ }
+}
+
+// symMergeCmpFunc merges the two sorted subsequences data[a:m] and data[m:b] using
+// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
+// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
+// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
+// Computer Science, pages 714-723. Springer, 2004.
+//
+// Let M = m-a and N = b-n. Wolog M < N.
+// The recursion depth is bound by ceil(log(N+M)).
+// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
+// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
+//
+// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
+// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
+// in the paper carries through for Swap operations, especially as the block
+// swapping rotate uses only O(M+N) Swaps.
+//
+// symMerge assumes non-degenerate arguments: a < m && m < b.
+// Having the caller check this condition eliminates many leaf recursion calls,
+// which improves performance.
+func symMergeCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) {
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[a] into data[m:b]
+ // if data[a:m] only contains one element.
+ if m-a == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] >= data[a] for m <= i < b.
+ // Exit the search loop with i == b in case no such index exists.
+ i := m
+ j := b
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if cmp(data[h], data[a]) < 0 {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[a] reaches the position before i.
+ for k := a; k < i-1; k++ {
+ data[k], data[k+1] = data[k+1], data[k]
+ }
+ return
+ }
+
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[m] into data[a:m]
+ // if data[m:b] only contains one element.
+ if b-m == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] > data[m] for a <= i < m.
+ // Exit the search loop with i == m in case no such index exists.
+ i := a
+ j := m
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if !(cmp(data[m], data[h]) < 0) {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[m] reaches the position i.
+ for k := m; k > i; k-- {
+ data[k], data[k-1] = data[k-1], data[k]
+ }
+ return
+ }
+
+ mid := int(uint(a+b) >> 1)
+ n := mid + m
+ var start, r int
+ if m > mid {
+ start = n - b
+ r = mid
+ } else {
+ start = a
+ r = m
+ }
+ p := n - 1
+
+ for start < r {
+ c := int(uint(start+r) >> 1)
+ if !(cmp(data[p-c], data[c]) < 0) {
+ start = c + 1
+ } else {
+ r = c
+ }
+ }
+
+ end := n - start
+ if start < m && m < end {
+ rotateCmpFunc(data, start, m, end, cmp)
+ }
+ if a < start && start < mid {
+ symMergeCmpFunc(data, a, start, mid, cmp)
+ }
+ if mid < end && end < b {
+ symMergeCmpFunc(data, mid, end, b, cmp)
+ }
+}
+
+// rotateCmpFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
+// Data of the form 'x u v y' is changed to 'x v u y'.
+// rotate performs at most b-a many calls to data.Swap,
+// and it assumes non-degenerate arguments: a < m && m < b.
+func rotateCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) {
+ i := m - a
+ j := b - m
+
+ for i != j {
+ if i > j {
+ swapRangeCmpFunc(data, m-i, m, j, cmp)
+ i -= j
+ } else {
+ swapRangeCmpFunc(data, m-i, m+j-i, i, cmp)
+ j -= i
+ }
+ }
+ // i == j
+ swapRangeCmpFunc(data, m-i, m, i, cmp)
+}
diff --git a/vendor/golang.org/x/exp/slices/zsortordered.go b/vendor/golang.org/x/exp/slices/zsortordered.go
new file mode 100644
index 000000000..99b47c398
--- /dev/null
+++ b/vendor/golang.org/x/exp/slices/zsortordered.go
@@ -0,0 +1,481 @@
+// Code generated by gen_sort_variants.go; DO NOT EDIT.
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slices
+
+import "golang.org/x/exp/constraints"
+
+// insertionSortOrdered sorts data[a:b] using insertion sort.
+func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) {
+ for i := a + 1; i < b; i++ {
+ for j := i; j > a && cmpLess(data[j], data[j-1]); j-- {
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+}
+
+// siftDownOrdered implements the heap property on data[lo:hi].
+// first is an offset into the array where the root of the heap lies.
+func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) {
+ root := lo
+ for {
+ child := 2*root + 1
+ if child >= hi {
+ break
+ }
+ if child+1 < hi && cmpLess(data[first+child], data[first+child+1]) {
+ child++
+ }
+ if !cmpLess(data[first+root], data[first+child]) {
+ return
+ }
+ data[first+root], data[first+child] = data[first+child], data[first+root]
+ root = child
+ }
+}
+
+func heapSortOrdered[E constraints.Ordered](data []E, a, b int) {
+ first := a
+ lo := 0
+ hi := b - a
+
+ // Build heap with greatest element at top.
+ for i := (hi - 1) / 2; i >= 0; i-- {
+ siftDownOrdered(data, i, hi, first)
+ }
+
+ // Pop elements, largest first, into end of data.
+ for i := hi - 1; i >= 0; i-- {
+ data[first], data[first+i] = data[first+i], data[first]
+ siftDownOrdered(data, lo, i, first)
+ }
+}
+
+// pdqsortOrdered sorts data[a:b].
+// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
+// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
+// C++ implementation: https://github.com/orlp/pdqsort
+// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
+// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
+func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) {
+ const maxInsertion = 12
+
+ var (
+ wasBalanced = true // whether the last partitioning was reasonably balanced
+ wasPartitioned = true // whether the slice was already partitioned
+ )
+
+ for {
+ length := b - a
+
+ if length <= maxInsertion {
+ insertionSortOrdered(data, a, b)
+ return
+ }
+
+ // Fall back to heapsort if too many bad choices were made.
+ if limit == 0 {
+ heapSortOrdered(data, a, b)
+ return
+ }
+
+ // If the last partitioning was imbalanced, we need to breaking patterns.
+ if !wasBalanced {
+ breakPatternsOrdered(data, a, b)
+ limit--
+ }
+
+ pivot, hint := choosePivotOrdered(data, a, b)
+ if hint == decreasingHint {
+ reverseRangeOrdered(data, a, b)
+ // The chosen pivot was pivot-a elements after the start of the array.
+ // After reversing it is pivot-a elements before the end of the array.
+ // The idea came from Rust's implementation.
+ pivot = (b - 1) - (pivot - a)
+ hint = increasingHint
+ }
+
+ // The slice is likely already sorted.
+ if wasBalanced && wasPartitioned && hint == increasingHint {
+ if partialInsertionSortOrdered(data, a, b) {
+ return
+ }
+ }
+
+ // Probably the slice contains many duplicate elements, partition the slice into
+ // elements equal to and elements greater than the pivot.
+ if a > 0 && !cmpLess(data[a-1], data[pivot]) {
+ mid := partitionEqualOrdered(data, a, b, pivot)
+ a = mid
+ continue
+ }
+
+ mid, alreadyPartitioned := partitionOrdered(data, a, b, pivot)
+ wasPartitioned = alreadyPartitioned
+
+ leftLen, rightLen := mid-a, b-mid
+ balanceThreshold := length / 8
+ if leftLen < rightLen {
+ wasBalanced = leftLen >= balanceThreshold
+ pdqsortOrdered(data, a, mid, limit)
+ a = mid + 1
+ } else {
+ wasBalanced = rightLen >= balanceThreshold
+ pdqsortOrdered(data, mid+1, b, limit)
+ b = mid
+ }
+ }
+}
+
+// partitionOrdered does one quicksort partition.
+// Let p = data[pivot]
+// Moves elements in data[a:b] around, so that data[i]
=p for inewpivot.
+// On return, data[newpivot] = p
+func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int, alreadyPartitioned bool) {
+ data[a], data[pivot] = data[pivot], data[a]
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for i <= j && cmpLess(data[i], data[a]) {
+ i++
+ }
+ for i <= j && !cmpLess(data[j], data[a]) {
+ j--
+ }
+ if i > j {
+ data[j], data[a] = data[a], data[j]
+ return j, true
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+
+ for {
+ for i <= j && cmpLess(data[i], data[a]) {
+ i++
+ }
+ for i <= j && !cmpLess(data[j], data[a]) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+ data[j], data[a] = data[a], data[j]
+ return j, false
+}
+
+// partitionEqualOrdered partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
+// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
+func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int) {
+ data[a], data[pivot] = data[pivot], data[a]
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for {
+ for i <= j && !cmpLess(data[a], data[i]) {
+ i++
+ }
+ for i <= j && cmpLess(data[a], data[j]) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+ return i
+}
+
+// partialInsertionSortOrdered partially sorts a slice, returns true if the slice is sorted at the end.
+func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool {
+ const (
+ maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
+ shortestShifting = 50 // don't shift any elements on short arrays
+ )
+ i := a + 1
+ for j := 0; j < maxSteps; j++ {
+ for i < b && !cmpLess(data[i], data[i-1]) {
+ i++
+ }
+
+ if i == b {
+ return true
+ }
+
+ if b-a < shortestShifting {
+ return false
+ }
+
+ data[i], data[i-1] = data[i-1], data[i]
+
+ // Shift the smaller one to the left.
+ if i-a >= 2 {
+ for j := i - 1; j >= 1; j-- {
+ if !cmpLess(data[j], data[j-1]) {
+ break
+ }
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+ // Shift the greater one to the right.
+ if b-i >= 2 {
+ for j := i + 1; j < b; j++ {
+ if !cmpLess(data[j], data[j-1]) {
+ break
+ }
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+ }
+ return false
+}
+
+// breakPatternsOrdered scatters some elements around in an attempt to break some patterns
+// that might cause imbalanced partitions in quicksort.
+func breakPatternsOrdered[E constraints.Ordered](data []E, a, b int) {
+ length := b - a
+ if length >= 8 {
+ random := xorshift(length)
+ modulus := nextPowerOfTwo(length)
+
+ for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
+ other := int(uint(random.Next()) & (modulus - 1))
+ if other >= length {
+ other -= length
+ }
+ data[idx], data[a+other] = data[a+other], data[idx]
+ }
+ }
+}
+
+// choosePivotOrdered chooses a pivot in data[a:b].
+//
+// [0,8): chooses a static pivot.
+// [8,shortestNinther): uses the simple median-of-three method.
+// [shortestNinther,∞): uses the Tukey ninther method.
+func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, hint sortedHint) {
+ const (
+ shortestNinther = 50
+ maxSwaps = 4 * 3
+ )
+
+ l := b - a
+
+ var (
+ swaps int
+ i = a + l/4*1
+ j = a + l/4*2
+ k = a + l/4*3
+ )
+
+ if l >= 8 {
+ if l >= shortestNinther {
+ // Tukey ninther method, the idea came from Rust's implementation.
+ i = medianAdjacentOrdered(data, i, &swaps)
+ j = medianAdjacentOrdered(data, j, &swaps)
+ k = medianAdjacentOrdered(data, k, &swaps)
+ }
+ // Find the median among i, j, k and stores it into j.
+ j = medianOrdered(data, i, j, k, &swaps)
+ }
+
+ switch swaps {
+ case 0:
+ return j, increasingHint
+ case maxSwaps:
+ return j, decreasingHint
+ default:
+ return j, unknownHint
+ }
+}
+
+// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
+func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) {
+ if cmpLess(data[b], data[a]) {
+ *swaps++
+ return b, a
+ }
+ return a, b
+}
+
+// medianOrdered returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
+func medianOrdered[E constraints.Ordered](data []E, a, b, c int, swaps *int) int {
+ a, b = order2Ordered(data, a, b, swaps)
+ b, c = order2Ordered(data, b, c, swaps)
+ a, b = order2Ordered(data, a, b, swaps)
+ return b
+}
+
+// medianAdjacentOrdered finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
+func medianAdjacentOrdered[E constraints.Ordered](data []E, a int, swaps *int) int {
+ return medianOrdered(data, a-1, a, a+1, swaps)
+}
+
+func reverseRangeOrdered[E constraints.Ordered](data []E, a, b int) {
+ i := a
+ j := b - 1
+ for i < j {
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+}
+
+func swapRangeOrdered[E constraints.Ordered](data []E, a, b, n int) {
+ for i := 0; i < n; i++ {
+ data[a+i], data[b+i] = data[b+i], data[a+i]
+ }
+}
+
+func stableOrdered[E constraints.Ordered](data []E, n int) {
+ blockSize := 20 // must be > 0
+ a, b := 0, blockSize
+ for b <= n {
+ insertionSortOrdered(data, a, b)
+ a = b
+ b += blockSize
+ }
+ insertionSortOrdered(data, a, n)
+
+ for blockSize < n {
+ a, b = 0, 2*blockSize
+ for b <= n {
+ symMergeOrdered(data, a, a+blockSize, b)
+ a = b
+ b += 2 * blockSize
+ }
+ if m := a + blockSize; m < n {
+ symMergeOrdered(data, a, m, n)
+ }
+ blockSize *= 2
+ }
+}
+
+// symMergeOrdered merges the two sorted subsequences data[a:m] and data[m:b] using
+// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
+// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
+// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
+// Computer Science, pages 714-723. Springer, 2004.
+//
+// Let M = m-a and N = b-n. Wolog M < N.
+// The recursion depth is bound by ceil(log(N+M)).
+// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
+// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
+//
+// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
+// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
+// in the paper carries through for Swap operations, especially as the block
+// swapping rotate uses only O(M+N) Swaps.
+//
+// symMerge assumes non-degenerate arguments: a < m && m < b.
+// Having the caller check this condition eliminates many leaf recursion calls,
+// which improves performance.
+func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) {
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[a] into data[m:b]
+ // if data[a:m] only contains one element.
+ if m-a == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] >= data[a] for m <= i < b.
+ // Exit the search loop with i == b in case no such index exists.
+ i := m
+ j := b
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if cmpLess(data[h], data[a]) {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[a] reaches the position before i.
+ for k := a; k < i-1; k++ {
+ data[k], data[k+1] = data[k+1], data[k]
+ }
+ return
+ }
+
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[m] into data[a:m]
+ // if data[m:b] only contains one element.
+ if b-m == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] > data[m] for a <= i < m.
+ // Exit the search loop with i == m in case no such index exists.
+ i := a
+ j := m
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if !cmpLess(data[m], data[h]) {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[m] reaches the position i.
+ for k := m; k > i; k-- {
+ data[k], data[k-1] = data[k-1], data[k]
+ }
+ return
+ }
+
+ mid := int(uint(a+b) >> 1)
+ n := mid + m
+ var start, r int
+ if m > mid {
+ start = n - b
+ r = mid
+ } else {
+ start = a
+ r = m
+ }
+ p := n - 1
+
+ for start < r {
+ c := int(uint(start+r) >> 1)
+ if !cmpLess(data[p-c], data[c]) {
+ start = c + 1
+ } else {
+ r = c
+ }
+ }
+
+ end := n - start
+ if start < m && m < end {
+ rotateOrdered(data, start, m, end)
+ }
+ if a < start && start < mid {
+ symMergeOrdered(data, a, start, mid)
+ }
+ if mid < end && end < b {
+ symMergeOrdered(data, mid, end, b)
+ }
+}
+
+// rotateOrdered rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
+// Data of the form 'x u v y' is changed to 'x v u y'.
+// rotate performs at most b-a many calls to data.Swap,
+// and it assumes non-degenerate arguments: a < m && m < b.
+func rotateOrdered[E constraints.Ordered](data []E, a, m, b int) {
+ i := m - a
+ j := b - m
+
+ for i != j {
+ if i > j {
+ swapRangeOrdered(data, m-i, m, j)
+ i -= j
+ } else {
+ swapRangeOrdered(data, m-i, m+j-i, i)
+ j -= i
+ }
+ }
+ // i == j
+ swapRangeOrdered(data, m-i, m, i)
+}
diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go
new file mode 100644
index 000000000..db1c95fab
--- /dev/null
+++ b/vendor/golang.org/x/net/context/context.go
@@ -0,0 +1,144 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package context defines the Context type, which carries deadlines,
+// cancellation signals, and other request-scoped values across API boundaries
+// and between processes.
+// As of Go 1.7 this package is available in the standard library under the
+// name [context], and migrating to it can be done automatically with [go fix].
+//
+// Incoming requests to a server should create a [Context], and outgoing
+// calls to servers should accept a Context. The chain of function
+// calls between them must propagate the Context, optionally replacing
+// it with a derived Context created using [WithCancel], [WithDeadline],
+// [WithTimeout], or [WithValue].
+//
+// Programs that use Contexts should follow these rules to keep interfaces
+// consistent across packages and enable static analysis tools to check context
+// propagation:
+//
+// Do not store Contexts inside a struct type; instead, pass a Context
+// explicitly to each function that needs it. This is discussed further in
+// https://go.dev/blog/context-and-structs. The Context should be the first
+// parameter, typically named ctx:
+//
+// func DoSomething(ctx context.Context, arg Arg) error {
+// // ... use ctx ...
+// }
+//
+// Do not pass a nil [Context], even if a function permits it. Pass [context.TODO]
+// if you are unsure about which Context to use.
+//
+// Use context Values only for request-scoped data that transits processes and
+// APIs, not for passing optional parameters to functions.
+//
+// The same Context may be passed to functions running in different goroutines;
+// Contexts are safe for simultaneous use by multiple goroutines.
+//
+// See https://go.dev/blog/context for example code for a server that uses
+// Contexts.
+//
+// [go fix]: https://go.dev/cmd/go#hdr-Update_packages_to_use_new_APIs
+package context
+
+import (
+ "context" // standard library's context, as of Go 1.7
+ "time"
+)
+
+// A Context carries a deadline, a cancellation signal, and other values across
+// API boundaries.
+//
+// Context's methods may be called by multiple goroutines simultaneously.
+type Context = context.Context
+
+// Canceled is the error returned by [Context.Err] when the context is canceled
+// for some reason other than its deadline passing.
+var Canceled = context.Canceled
+
+// DeadlineExceeded is the error returned by [Context.Err] when the context is canceled
+// due to its deadline passing.
+var DeadlineExceeded = context.DeadlineExceeded
+
+// Background returns a non-nil, empty Context. It is never canceled, has no
+// values, and has no deadline. It is typically used by the main function,
+// initialization, and tests, and as the top-level Context for incoming
+// requests.
+func Background() Context {
+ return background
+}
+
+// TODO returns a non-nil, empty Context. Code should use context.TODO when
+// it's unclear which Context to use or it is not yet available (because the
+// surrounding function has not yet been extended to accept a Context
+// parameter).
+func TODO() Context {
+ return todo
+}
+
+var (
+ background = context.Background()
+ todo = context.TODO()
+)
+
+// A CancelFunc tells an operation to abandon its work.
+// A CancelFunc does not wait for the work to stop.
+// A CancelFunc may be called by multiple goroutines simultaneously.
+// After the first call, subsequent calls to a CancelFunc do nothing.
+type CancelFunc = context.CancelFunc
+
+// WithCancel returns a derived context that points to the parent context
+// but has a new Done channel. The returned context's Done channel is closed
+// when the returned cancel function is called or when the parent context's
+// Done channel is closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this [Context] complete.
+func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
+ return context.WithCancel(parent)
+}
+
+// WithDeadline returns a derived context that points to the parent context
+// but has the deadline adjusted to be no later than d. If the parent's
+// deadline is already earlier than d, WithDeadline(parent, d) is semantically
+// equivalent to parent. The returned [Context.Done] channel is closed when
+// the deadline expires, when the returned cancel function is called,
+// or when the parent context's Done channel is closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this [Context] complete.
+func WithDeadline(parent Context, d time.Time) (Context, CancelFunc) {
+ return context.WithDeadline(parent, d)
+}
+
+// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this [Context] complete:
+//
+// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
+// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
+// defer cancel() // releases resources if slowOperation completes before timeout elapses
+// return slowOperation(ctx)
+// }
+func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
+ return context.WithTimeout(parent, timeout)
+}
+
+// WithValue returns a derived context that points to the parent Context.
+// In the derived context, the value associated with key is val.
+//
+// Use context Values only for request-scoped data that transits processes and
+// APIs, not for passing optional parameters to functions.
+//
+// The provided key must be comparable and should not be of type
+// string or any other built-in type to avoid collisions between
+// packages using context. Users of WithValue should define their own
+// types for keys. To avoid allocating when assigning to an
+// interface{}, context keys often have concrete type
+// struct{}. Alternatively, exported context key variables' static
+// type should be a pointer or interface.
+func WithValue(parent Context, key, val interface{}) Context {
+ return context.WithValue(parent, key, val)
+}
diff --git a/vendor/golang.org/x/net/websocket/client.go b/vendor/golang.org/x/net/websocket/client.go
new file mode 100644
index 000000000..1e64157f3
--- /dev/null
+++ b/vendor/golang.org/x/net/websocket/client.go
@@ -0,0 +1,139 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "bufio"
+ "context"
+ "io"
+ "net"
+ "net/http"
+ "net/url"
+ "time"
+)
+
+// DialError is an error that occurs while dialling a websocket server.
+type DialError struct {
+ *Config
+ Err error
+}
+
+func (e *DialError) Error() string {
+ return "websocket.Dial " + e.Config.Location.String() + ": " + e.Err.Error()
+}
+
+// NewConfig creates a new WebSocket config for client connection.
+func NewConfig(server, origin string) (config *Config, err error) {
+ config = new(Config)
+ config.Version = ProtocolVersionHybi13
+ config.Location, err = url.ParseRequestURI(server)
+ if err != nil {
+ return
+ }
+ config.Origin, err = url.ParseRequestURI(origin)
+ if err != nil {
+ return
+ }
+ config.Header = http.Header(make(map[string][]string))
+ return
+}
+
+// NewClient creates a new WebSocket client connection over rwc.
+func NewClient(config *Config, rwc io.ReadWriteCloser) (ws *Conn, err error) {
+ br := bufio.NewReader(rwc)
+ bw := bufio.NewWriter(rwc)
+ err = hybiClientHandshake(config, br, bw)
+ if err != nil {
+ return
+ }
+ buf := bufio.NewReadWriter(br, bw)
+ ws = newHybiClientConn(config, buf, rwc)
+ return
+}
+
+// Dial opens a new client connection to a WebSocket.
+func Dial(url_, protocol, origin string) (ws *Conn, err error) {
+ config, err := NewConfig(url_, origin)
+ if err != nil {
+ return nil, err
+ }
+ if protocol != "" {
+ config.Protocol = []string{protocol}
+ }
+ return DialConfig(config)
+}
+
+var portMap = map[string]string{
+ "ws": "80",
+ "wss": "443",
+}
+
+func parseAuthority(location *url.URL) string {
+ if _, ok := portMap[location.Scheme]; ok {
+ if _, _, err := net.SplitHostPort(location.Host); err != nil {
+ return net.JoinHostPort(location.Host, portMap[location.Scheme])
+ }
+ }
+ return location.Host
+}
+
+// DialConfig opens a new client connection to a WebSocket with a config.
+func DialConfig(config *Config) (ws *Conn, err error) {
+ return config.DialContext(context.Background())
+}
+
+// DialContext opens a new client connection to a WebSocket, with context support for timeouts/cancellation.
+func (config *Config) DialContext(ctx context.Context) (*Conn, error) {
+ if config.Location == nil {
+ return nil, &DialError{config, ErrBadWebSocketLocation}
+ }
+ if config.Origin == nil {
+ return nil, &DialError{config, ErrBadWebSocketOrigin}
+ }
+
+ dialer := config.Dialer
+ if dialer == nil {
+ dialer = &net.Dialer{}
+ }
+
+ client, err := dialWithDialer(ctx, dialer, config)
+ if err != nil {
+ return nil, &DialError{config, err}
+ }
+
+ // Cleanup the connection if we fail to create the websocket successfully
+ success := false
+ defer func() {
+ if !success {
+ _ = client.Close()
+ }
+ }()
+
+ var ws *Conn
+ var wsErr error
+ doneConnecting := make(chan struct{})
+ go func() {
+ defer close(doneConnecting)
+ ws, err = NewClient(config, client)
+ if err != nil {
+ wsErr = &DialError{config, err}
+ }
+ }()
+
+ // The websocket.NewClient() function can block indefinitely, make sure that we
+ // respect the deadlines specified by the context.
+ select {
+ case <-ctx.Done():
+ // Force the pending operations to fail, terminating the pending connection attempt
+ _ = client.SetDeadline(time.Now())
+ <-doneConnecting // Wait for the goroutine that tries to establish the connection to finish
+ return nil, &DialError{config, ctx.Err()}
+ case <-doneConnecting:
+ if wsErr == nil {
+ success = true // Disarm the deferred connection cleanup
+ }
+ return ws, wsErr
+ }
+}
diff --git a/vendor/golang.org/x/net/websocket/dial.go b/vendor/golang.org/x/net/websocket/dial.go
new file mode 100644
index 000000000..8a2d83c47
--- /dev/null
+++ b/vendor/golang.org/x/net/websocket/dial.go
@@ -0,0 +1,29 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "context"
+ "crypto/tls"
+ "net"
+)
+
+func dialWithDialer(ctx context.Context, dialer *net.Dialer, config *Config) (conn net.Conn, err error) {
+ switch config.Location.Scheme {
+ case "ws":
+ conn, err = dialer.DialContext(ctx, "tcp", parseAuthority(config.Location))
+
+ case "wss":
+ tlsDialer := &tls.Dialer{
+ NetDialer: dialer,
+ Config: config.TlsConfig,
+ }
+
+ conn, err = tlsDialer.DialContext(ctx, "tcp", parseAuthority(config.Location))
+ default:
+ err = ErrBadScheme
+ }
+ return
+}
diff --git a/vendor/golang.org/x/net/websocket/hybi.go b/vendor/golang.org/x/net/websocket/hybi.go
new file mode 100644
index 000000000..dda743466
--- /dev/null
+++ b/vendor/golang.org/x/net/websocket/hybi.go
@@ -0,0 +1,582 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+// This file implements a protocol of hybi draft.
+// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17
+
+import (
+ "bufio"
+ "bytes"
+ "crypto/rand"
+ "crypto/sha1"
+ "encoding/base64"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "strings"
+)
+
+const (
+ websocketGUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
+
+ closeStatusNormal = 1000
+ closeStatusGoingAway = 1001
+ closeStatusProtocolError = 1002
+ closeStatusUnsupportedData = 1003
+ closeStatusFrameTooLarge = 1004
+ closeStatusNoStatusRcvd = 1005
+ closeStatusAbnormalClosure = 1006
+ closeStatusBadMessageData = 1007
+ closeStatusPolicyViolation = 1008
+ closeStatusTooBigData = 1009
+ closeStatusExtensionMismatch = 1010
+
+ maxControlFramePayloadLength = 125
+)
+
+var (
+ ErrBadMaskingKey = &ProtocolError{"bad masking key"}
+ ErrBadPongMessage = &ProtocolError{"bad pong message"}
+ ErrBadClosingStatus = &ProtocolError{"bad closing status"}
+ ErrUnsupportedExtensions = &ProtocolError{"unsupported extensions"}
+ ErrNotImplemented = &ProtocolError{"not implemented"}
+
+ handshakeHeader = map[string]bool{
+ "Host": true,
+ "Upgrade": true,
+ "Connection": true,
+ "Sec-Websocket-Key": true,
+ "Sec-Websocket-Origin": true,
+ "Sec-Websocket-Version": true,
+ "Sec-Websocket-Protocol": true,
+ "Sec-Websocket-Accept": true,
+ }
+)
+
+// A hybiFrameHeader is a frame header as defined in hybi draft.
+type hybiFrameHeader struct {
+ Fin bool
+ Rsv [3]bool
+ OpCode byte
+ Length int64
+ MaskingKey []byte
+
+ data *bytes.Buffer
+}
+
+// A hybiFrameReader is a reader for hybi frame.
+type hybiFrameReader struct {
+ reader io.Reader
+
+ header hybiFrameHeader
+ pos int64
+ length int
+}
+
+func (frame *hybiFrameReader) Read(msg []byte) (n int, err error) {
+ n, err = frame.reader.Read(msg)
+ if frame.header.MaskingKey != nil {
+ for i := 0; i < n; i++ {
+ msg[i] = msg[i] ^ frame.header.MaskingKey[frame.pos%4]
+ frame.pos++
+ }
+ }
+ return n, err
+}
+
+func (frame *hybiFrameReader) PayloadType() byte { return frame.header.OpCode }
+
+func (frame *hybiFrameReader) HeaderReader() io.Reader {
+ if frame.header.data == nil {
+ return nil
+ }
+ if frame.header.data.Len() == 0 {
+ return nil
+ }
+ return frame.header.data
+}
+
+func (frame *hybiFrameReader) TrailerReader() io.Reader { return nil }
+
+func (frame *hybiFrameReader) Len() (n int) { return frame.length }
+
+// A hybiFrameReaderFactory creates new frame reader based on its frame type.
+type hybiFrameReaderFactory struct {
+ *bufio.Reader
+}
+
+// NewFrameReader reads a frame header from the connection, and creates new reader for the frame.
+// See Section 5.2 Base Framing protocol for detail.
+// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17#section-5.2
+func (buf hybiFrameReaderFactory) NewFrameReader() (frame frameReader, err error) {
+ hybiFrame := new(hybiFrameReader)
+ frame = hybiFrame
+ var header []byte
+ var b byte
+ // First byte. FIN/RSV1/RSV2/RSV3/OpCode(4bits)
+ b, err = buf.ReadByte()
+ if err != nil {
+ return
+ }
+ header = append(header, b)
+ hybiFrame.header.Fin = ((header[0] >> 7) & 1) != 0
+ for i := 0; i < 3; i++ {
+ j := uint(6 - i)
+ hybiFrame.header.Rsv[i] = ((header[0] >> j) & 1) != 0
+ }
+ hybiFrame.header.OpCode = header[0] & 0x0f
+
+ // Second byte. Mask/Payload len(7bits)
+ b, err = buf.ReadByte()
+ if err != nil {
+ return
+ }
+ header = append(header, b)
+ mask := (b & 0x80) != 0
+ b &= 0x7f
+ lengthFields := 0
+ switch {
+ case b <= 125: // Payload length 7bits.
+ hybiFrame.header.Length = int64(b)
+ case b == 126: // Payload length 7+16bits
+ lengthFields = 2
+ case b == 127: // Payload length 7+64bits
+ lengthFields = 8
+ }
+ for i := 0; i < lengthFields; i++ {
+ b, err = buf.ReadByte()
+ if err != nil {
+ return
+ }
+ if lengthFields == 8 && i == 0 { // MSB must be zero when 7+64 bits
+ b &= 0x7f
+ }
+ header = append(header, b)
+ hybiFrame.header.Length = hybiFrame.header.Length*256 + int64(b)
+ }
+ if mask {
+ // Masking key. 4 bytes.
+ for i := 0; i < 4; i++ {
+ b, err = buf.ReadByte()
+ if err != nil {
+ return
+ }
+ header = append(header, b)
+ hybiFrame.header.MaskingKey = append(hybiFrame.header.MaskingKey, b)
+ }
+ }
+ hybiFrame.reader = io.LimitReader(buf.Reader, hybiFrame.header.Length)
+ hybiFrame.header.data = bytes.NewBuffer(header)
+ hybiFrame.length = len(header) + int(hybiFrame.header.Length)
+ return
+}
+
+// A HybiFrameWriter is a writer for hybi frame.
+type hybiFrameWriter struct {
+ writer *bufio.Writer
+
+ header *hybiFrameHeader
+}
+
+func (frame *hybiFrameWriter) Write(msg []byte) (n int, err error) {
+ var header []byte
+ var b byte
+ if frame.header.Fin {
+ b |= 0x80
+ }
+ for i := 0; i < 3; i++ {
+ if frame.header.Rsv[i] {
+ j := uint(6 - i)
+ b |= 1 << j
+ }
+ }
+ b |= frame.header.OpCode
+ header = append(header, b)
+ if frame.header.MaskingKey != nil {
+ b = 0x80
+ } else {
+ b = 0
+ }
+ lengthFields := 0
+ length := len(msg)
+ switch {
+ case length <= 125:
+ b |= byte(length)
+ case length < 65536:
+ b |= 126
+ lengthFields = 2
+ default:
+ b |= 127
+ lengthFields = 8
+ }
+ header = append(header, b)
+ for i := 0; i < lengthFields; i++ {
+ j := uint((lengthFields - i - 1) * 8)
+ b = byte((length >> j) & 0xff)
+ header = append(header, b)
+ }
+ if frame.header.MaskingKey != nil {
+ if len(frame.header.MaskingKey) != 4 {
+ return 0, ErrBadMaskingKey
+ }
+ header = append(header, frame.header.MaskingKey...)
+ frame.writer.Write(header)
+ data := make([]byte, length)
+ for i := range data {
+ data[i] = msg[i] ^ frame.header.MaskingKey[i%4]
+ }
+ frame.writer.Write(data)
+ err = frame.writer.Flush()
+ return length, err
+ }
+ frame.writer.Write(header)
+ frame.writer.Write(msg)
+ err = frame.writer.Flush()
+ return length, err
+}
+
+func (frame *hybiFrameWriter) Close() error { return nil }
+
+type hybiFrameWriterFactory struct {
+ *bufio.Writer
+ needMaskingKey bool
+}
+
+func (buf hybiFrameWriterFactory) NewFrameWriter(payloadType byte) (frame frameWriter, err error) {
+ frameHeader := &hybiFrameHeader{Fin: true, OpCode: payloadType}
+ if buf.needMaskingKey {
+ frameHeader.MaskingKey, err = generateMaskingKey()
+ if err != nil {
+ return nil, err
+ }
+ }
+ return &hybiFrameWriter{writer: buf.Writer, header: frameHeader}, nil
+}
+
+type hybiFrameHandler struct {
+ conn *Conn
+ payloadType byte
+}
+
+func (handler *hybiFrameHandler) HandleFrame(frame frameReader) (frameReader, error) {
+ if handler.conn.IsServerConn() {
+ // The client MUST mask all frames sent to the server.
+ if frame.(*hybiFrameReader).header.MaskingKey == nil {
+ handler.WriteClose(closeStatusProtocolError)
+ return nil, io.EOF
+ }
+ } else {
+ // The server MUST NOT mask all frames.
+ if frame.(*hybiFrameReader).header.MaskingKey != nil {
+ handler.WriteClose(closeStatusProtocolError)
+ return nil, io.EOF
+ }
+ }
+ if header := frame.HeaderReader(); header != nil {
+ io.Copy(io.Discard, header)
+ }
+ switch frame.PayloadType() {
+ case ContinuationFrame:
+ frame.(*hybiFrameReader).header.OpCode = handler.payloadType
+ case TextFrame, BinaryFrame:
+ handler.payloadType = frame.PayloadType()
+ case CloseFrame:
+ return nil, io.EOF
+ case PingFrame, PongFrame:
+ b := make([]byte, maxControlFramePayloadLength)
+ n, err := io.ReadFull(frame, b)
+ if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
+ return nil, err
+ }
+ io.Copy(io.Discard, frame)
+ if frame.PayloadType() == PingFrame {
+ if _, err := handler.WritePong(b[:n]); err != nil {
+ return nil, err
+ }
+ }
+ return nil, nil
+ }
+ return frame, nil
+}
+
+func (handler *hybiFrameHandler) WriteClose(status int) (err error) {
+ handler.conn.wio.Lock()
+ defer handler.conn.wio.Unlock()
+ w, err := handler.conn.frameWriterFactory.NewFrameWriter(CloseFrame)
+ if err != nil {
+ return err
+ }
+ msg := make([]byte, 2)
+ binary.BigEndian.PutUint16(msg, uint16(status))
+ _, err = w.Write(msg)
+ w.Close()
+ return err
+}
+
+func (handler *hybiFrameHandler) WritePong(msg []byte) (n int, err error) {
+ handler.conn.wio.Lock()
+ defer handler.conn.wio.Unlock()
+ w, err := handler.conn.frameWriterFactory.NewFrameWriter(PongFrame)
+ if err != nil {
+ return 0, err
+ }
+ n, err = w.Write(msg)
+ w.Close()
+ return n, err
+}
+
+// newHybiConn creates a new WebSocket connection speaking hybi draft protocol.
+func newHybiConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn {
+ if buf == nil {
+ br := bufio.NewReader(rwc)
+ bw := bufio.NewWriter(rwc)
+ buf = bufio.NewReadWriter(br, bw)
+ }
+ ws := &Conn{config: config, request: request, buf: buf, rwc: rwc,
+ frameReaderFactory: hybiFrameReaderFactory{buf.Reader},
+ frameWriterFactory: hybiFrameWriterFactory{
+ buf.Writer, request == nil},
+ PayloadType: TextFrame,
+ defaultCloseStatus: closeStatusNormal}
+ ws.frameHandler = &hybiFrameHandler{conn: ws}
+ return ws
+}
+
+// generateMaskingKey generates a masking key for a frame.
+func generateMaskingKey() (maskingKey []byte, err error) {
+ maskingKey = make([]byte, 4)
+ if _, err = io.ReadFull(rand.Reader, maskingKey); err != nil {
+ return
+ }
+ return
+}
+
+// generateNonce generates a nonce consisting of a randomly selected 16-byte
+// value that has been base64-encoded.
+func generateNonce() (nonce []byte) {
+ key := make([]byte, 16)
+ if _, err := io.ReadFull(rand.Reader, key); err != nil {
+ panic(err)
+ }
+ nonce = make([]byte, 24)
+ base64.StdEncoding.Encode(nonce, key)
+ return
+}
+
+// removeZone removes IPv6 zone identifier from host.
+// E.g., "[fe80::1%en0]:8080" to "[fe80::1]:8080"
+func removeZone(host string) string {
+ if !strings.HasPrefix(host, "[") {
+ return host
+ }
+ i := strings.LastIndex(host, "]")
+ if i < 0 {
+ return host
+ }
+ j := strings.LastIndex(host[:i], "%")
+ if j < 0 {
+ return host
+ }
+ return host[:j] + host[i:]
+}
+
+// getNonceAccept computes the base64-encoded SHA-1 of the concatenation of
+// the nonce ("Sec-WebSocket-Key" value) with the websocket GUID string.
+func getNonceAccept(nonce []byte) (expected []byte, err error) {
+ h := sha1.New()
+ if _, err = h.Write(nonce); err != nil {
+ return
+ }
+ if _, err = h.Write([]byte(websocketGUID)); err != nil {
+ return
+ }
+ expected = make([]byte, 28)
+ base64.StdEncoding.Encode(expected, h.Sum(nil))
+ return
+}
+
+// Client handshake described in draft-ietf-hybi-thewebsocket-protocol-17
+func hybiClientHandshake(config *Config, br *bufio.Reader, bw *bufio.Writer) (err error) {
+ bw.WriteString("GET " + config.Location.RequestURI() + " HTTP/1.1\r\n")
+
+ // According to RFC 6874, an HTTP client, proxy, or other
+ // intermediary must remove any IPv6 zone identifier attached
+ // to an outgoing URI.
+ bw.WriteString("Host: " + removeZone(config.Location.Host) + "\r\n")
+ bw.WriteString("Upgrade: websocket\r\n")
+ bw.WriteString("Connection: Upgrade\r\n")
+ nonce := generateNonce()
+ if config.handshakeData != nil {
+ nonce = []byte(config.handshakeData["key"])
+ }
+ bw.WriteString("Sec-WebSocket-Key: " + string(nonce) + "\r\n")
+ bw.WriteString("Origin: " + strings.ToLower(config.Origin.String()) + "\r\n")
+
+ if config.Version != ProtocolVersionHybi13 {
+ return ErrBadProtocolVersion
+ }
+
+ bw.WriteString("Sec-WebSocket-Version: " + fmt.Sprintf("%d", config.Version) + "\r\n")
+ if len(config.Protocol) > 0 {
+ bw.WriteString("Sec-WebSocket-Protocol: " + strings.Join(config.Protocol, ", ") + "\r\n")
+ }
+ // TODO(ukai): send Sec-WebSocket-Extensions.
+ err = config.Header.WriteSubset(bw, handshakeHeader)
+ if err != nil {
+ return err
+ }
+
+ bw.WriteString("\r\n")
+ if err = bw.Flush(); err != nil {
+ return err
+ }
+
+ resp, err := http.ReadResponse(br, &http.Request{Method: "GET"})
+ if err != nil {
+ return err
+ }
+ if resp.StatusCode != 101 {
+ return ErrBadStatus
+ }
+ if strings.ToLower(resp.Header.Get("Upgrade")) != "websocket" ||
+ strings.ToLower(resp.Header.Get("Connection")) != "upgrade" {
+ return ErrBadUpgrade
+ }
+ expectedAccept, err := getNonceAccept(nonce)
+ if err != nil {
+ return err
+ }
+ if resp.Header.Get("Sec-WebSocket-Accept") != string(expectedAccept) {
+ return ErrChallengeResponse
+ }
+ if resp.Header.Get("Sec-WebSocket-Extensions") != "" {
+ return ErrUnsupportedExtensions
+ }
+ offeredProtocol := resp.Header.Get("Sec-WebSocket-Protocol")
+ if offeredProtocol != "" {
+ protocolMatched := false
+ for i := 0; i < len(config.Protocol); i++ {
+ if config.Protocol[i] == offeredProtocol {
+ protocolMatched = true
+ break
+ }
+ }
+ if !protocolMatched {
+ return ErrBadWebSocketProtocol
+ }
+ config.Protocol = []string{offeredProtocol}
+ }
+
+ return nil
+}
+
+// newHybiClientConn creates a client WebSocket connection after handshake.
+func newHybiClientConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser) *Conn {
+ return newHybiConn(config, buf, rwc, nil)
+}
+
+// A HybiServerHandshaker performs a server handshake using hybi draft protocol.
+type hybiServerHandshaker struct {
+ *Config
+ accept []byte
+}
+
+func (c *hybiServerHandshaker) ReadHandshake(buf *bufio.Reader, req *http.Request) (code int, err error) {
+ c.Version = ProtocolVersionHybi13
+ if req.Method != "GET" {
+ return http.StatusMethodNotAllowed, ErrBadRequestMethod
+ }
+ // HTTP version can be safely ignored.
+
+ if strings.ToLower(req.Header.Get("Upgrade")) != "websocket" ||
+ !strings.Contains(strings.ToLower(req.Header.Get("Connection")), "upgrade") {
+ return http.StatusBadRequest, ErrNotWebSocket
+ }
+
+ key := req.Header.Get("Sec-Websocket-Key")
+ if key == "" {
+ return http.StatusBadRequest, ErrChallengeResponse
+ }
+ version := req.Header.Get("Sec-Websocket-Version")
+ switch version {
+ case "13":
+ c.Version = ProtocolVersionHybi13
+ default:
+ return http.StatusBadRequest, ErrBadWebSocketVersion
+ }
+ var scheme string
+ if req.TLS != nil {
+ scheme = "wss"
+ } else {
+ scheme = "ws"
+ }
+ c.Location, err = url.ParseRequestURI(scheme + "://" + req.Host + req.URL.RequestURI())
+ if err != nil {
+ return http.StatusBadRequest, err
+ }
+ protocol := strings.TrimSpace(req.Header.Get("Sec-Websocket-Protocol"))
+ if protocol != "" {
+ protocols := strings.Split(protocol, ",")
+ for i := 0; i < len(protocols); i++ {
+ c.Protocol = append(c.Protocol, strings.TrimSpace(protocols[i]))
+ }
+ }
+ c.accept, err = getNonceAccept([]byte(key))
+ if err != nil {
+ return http.StatusInternalServerError, err
+ }
+ return http.StatusSwitchingProtocols, nil
+}
+
+// Origin parses the Origin header in req.
+// If the Origin header is not set, it returns nil and nil.
+func Origin(config *Config, req *http.Request) (*url.URL, error) {
+ var origin string
+ switch config.Version {
+ case ProtocolVersionHybi13:
+ origin = req.Header.Get("Origin")
+ }
+ if origin == "" {
+ return nil, nil
+ }
+ return url.ParseRequestURI(origin)
+}
+
+func (c *hybiServerHandshaker) AcceptHandshake(buf *bufio.Writer) (err error) {
+ if len(c.Protocol) > 0 {
+ if len(c.Protocol) != 1 {
+ // You need choose a Protocol in Handshake func in Server.
+ return ErrBadWebSocketProtocol
+ }
+ }
+ buf.WriteString("HTTP/1.1 101 Switching Protocols\r\n")
+ buf.WriteString("Upgrade: websocket\r\n")
+ buf.WriteString("Connection: Upgrade\r\n")
+ buf.WriteString("Sec-WebSocket-Accept: " + string(c.accept) + "\r\n")
+ if len(c.Protocol) > 0 {
+ buf.WriteString("Sec-WebSocket-Protocol: " + c.Protocol[0] + "\r\n")
+ }
+ // TODO(ukai): send Sec-WebSocket-Extensions.
+ if c.Header != nil {
+ err := c.Header.WriteSubset(buf, handshakeHeader)
+ if err != nil {
+ return err
+ }
+ }
+ buf.WriteString("\r\n")
+ return buf.Flush()
+}
+
+func (c *hybiServerHandshaker) NewServerConn(buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn {
+ return newHybiServerConn(c.Config, buf, rwc, request)
+}
+
+// newHybiServerConn returns a new WebSocket connection speaking hybi draft protocol.
+func newHybiServerConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn {
+ return newHybiConn(config, buf, rwc, request)
+}
diff --git a/vendor/golang.org/x/net/websocket/server.go b/vendor/golang.org/x/net/websocket/server.go
new file mode 100644
index 000000000..0895dea19
--- /dev/null
+++ b/vendor/golang.org/x/net/websocket/server.go
@@ -0,0 +1,113 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "net/http"
+)
+
+func newServerConn(rwc io.ReadWriteCloser, buf *bufio.ReadWriter, req *http.Request, config *Config, handshake func(*Config, *http.Request) error) (conn *Conn, err error) {
+ var hs serverHandshaker = &hybiServerHandshaker{Config: config}
+ code, err := hs.ReadHandshake(buf.Reader, req)
+ if err == ErrBadWebSocketVersion {
+ fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code))
+ fmt.Fprintf(buf, "Sec-WebSocket-Version: %s\r\n", SupportedProtocolVersion)
+ buf.WriteString("\r\n")
+ buf.WriteString(err.Error())
+ buf.Flush()
+ return
+ }
+ if err != nil {
+ fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code))
+ buf.WriteString("\r\n")
+ buf.WriteString(err.Error())
+ buf.Flush()
+ return
+ }
+ if handshake != nil {
+ err = handshake(config, req)
+ if err != nil {
+ code = http.StatusForbidden
+ fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code))
+ buf.WriteString("\r\n")
+ buf.Flush()
+ return
+ }
+ }
+ err = hs.AcceptHandshake(buf.Writer)
+ if err != nil {
+ code = http.StatusBadRequest
+ fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code))
+ buf.WriteString("\r\n")
+ buf.Flush()
+ return
+ }
+ conn = hs.NewServerConn(buf, rwc, req)
+ return
+}
+
+// Server represents a server of a WebSocket.
+type Server struct {
+ // Config is a WebSocket configuration for new WebSocket connection.
+ Config
+
+ // Handshake is an optional function in WebSocket handshake.
+ // For example, you can check, or don't check Origin header.
+ // Another example, you can select config.Protocol.
+ Handshake func(*Config, *http.Request) error
+
+ // Handler handles a WebSocket connection.
+ Handler
+}
+
+// ServeHTTP implements the http.Handler interface for a WebSocket
+func (s Server) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ s.serveWebSocket(w, req)
+}
+
+func (s Server) serveWebSocket(w http.ResponseWriter, req *http.Request) {
+ rwc, buf, err := w.(http.Hijacker).Hijack()
+ if err != nil {
+ panic("Hijack failed: " + err.Error())
+ }
+ // The server should abort the WebSocket connection if it finds
+ // the client did not send a handshake that matches with protocol
+ // specification.
+ defer rwc.Close()
+ conn, err := newServerConn(rwc, buf, req, &s.Config, s.Handshake)
+ if err != nil {
+ return
+ }
+ if conn == nil {
+ panic("unexpected nil conn")
+ }
+ s.Handler(conn)
+}
+
+// Handler is a simple interface to a WebSocket browser client.
+// It checks if Origin header is valid URL by default.
+// You might want to verify websocket.Conn.Config().Origin in the func.
+// If you use Server instead of Handler, you could call websocket.Origin and
+// check the origin in your Handshake func. So, if you want to accept
+// non-browser clients, which do not send an Origin header, set a
+// Server.Handshake that does not check the origin.
+type Handler func(*Conn)
+
+func checkOrigin(config *Config, req *http.Request) (err error) {
+ config.Origin, err = Origin(config, req)
+ if err == nil && config.Origin == nil {
+ return fmt.Errorf("null origin")
+ }
+ return err
+}
+
+// ServeHTTP implements the http.Handler interface for a WebSocket
+func (h Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ s := Server{Handler: h, Handshake: checkOrigin}
+ s.serveWebSocket(w, req)
+}
diff --git a/vendor/golang.org/x/net/websocket/websocket.go b/vendor/golang.org/x/net/websocket/websocket.go
new file mode 100644
index 000000000..3448d2039
--- /dev/null
+++ b/vendor/golang.org/x/net/websocket/websocket.go
@@ -0,0 +1,449 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package websocket implements a client and server for the WebSocket protocol
+// as specified in RFC 6455.
+//
+// This package currently lacks some features found in an alternative
+// and more actively maintained WebSocket packages:
+//
+// - [github.com/gorilla/websocket]
+// - [github.com/coder/websocket]
+package websocket // import "golang.org/x/net/websocket"
+
+import (
+ "bufio"
+ "crypto/tls"
+ "encoding/json"
+ "errors"
+ "io"
+ "net"
+ "net/http"
+ "net/url"
+ "sync"
+ "time"
+)
+
+const (
+ ProtocolVersionHybi13 = 13
+ ProtocolVersionHybi = ProtocolVersionHybi13
+ SupportedProtocolVersion = "13"
+
+ ContinuationFrame = 0
+ TextFrame = 1
+ BinaryFrame = 2
+ CloseFrame = 8
+ PingFrame = 9
+ PongFrame = 10
+ UnknownFrame = 255
+
+ DefaultMaxPayloadBytes = 32 << 20 // 32MB
+)
+
+// ProtocolError represents WebSocket protocol errors.
+type ProtocolError struct {
+ ErrorString string
+}
+
+func (err *ProtocolError) Error() string { return err.ErrorString }
+
+var (
+ ErrBadProtocolVersion = &ProtocolError{"bad protocol version"}
+ ErrBadScheme = &ProtocolError{"bad scheme"}
+ ErrBadStatus = &ProtocolError{"bad status"}
+ ErrBadUpgrade = &ProtocolError{"missing or bad upgrade"}
+ ErrBadWebSocketOrigin = &ProtocolError{"missing or bad WebSocket-Origin"}
+ ErrBadWebSocketLocation = &ProtocolError{"missing or bad WebSocket-Location"}
+ ErrBadWebSocketProtocol = &ProtocolError{"missing or bad WebSocket-Protocol"}
+ ErrBadWebSocketVersion = &ProtocolError{"missing or bad WebSocket Version"}
+ ErrChallengeResponse = &ProtocolError{"mismatch challenge/response"}
+ ErrBadFrame = &ProtocolError{"bad frame"}
+ ErrBadFrameBoundary = &ProtocolError{"not on frame boundary"}
+ ErrNotWebSocket = &ProtocolError{"not websocket protocol"}
+ ErrBadRequestMethod = &ProtocolError{"bad method"}
+ ErrNotSupported = &ProtocolError{"not supported"}
+)
+
+// ErrFrameTooLarge is returned by Codec's Receive method if payload size
+// exceeds limit set by Conn.MaxPayloadBytes
+var ErrFrameTooLarge = errors.New("websocket: frame payload size exceeds limit")
+
+// Addr is an implementation of net.Addr for WebSocket.
+type Addr struct {
+ *url.URL
+}
+
+// Network returns the network type for a WebSocket, "websocket".
+func (addr *Addr) Network() string { return "websocket" }
+
+// Config is a WebSocket configuration
+type Config struct {
+ // A WebSocket server address.
+ Location *url.URL
+
+ // A Websocket client origin.
+ Origin *url.URL
+
+ // WebSocket subprotocols.
+ Protocol []string
+
+ // WebSocket protocol version.
+ Version int
+
+ // TLS config for secure WebSocket (wss).
+ TlsConfig *tls.Config
+
+ // Additional header fields to be sent in WebSocket opening handshake.
+ Header http.Header
+
+ // Dialer used when opening websocket connections.
+ Dialer *net.Dialer
+
+ handshakeData map[string]string
+}
+
+// serverHandshaker is an interface to handle WebSocket server side handshake.
+type serverHandshaker interface {
+ // ReadHandshake reads handshake request message from client.
+ // Returns http response code and error if any.
+ ReadHandshake(buf *bufio.Reader, req *http.Request) (code int, err error)
+
+ // AcceptHandshake accepts the client handshake request and sends
+ // handshake response back to client.
+ AcceptHandshake(buf *bufio.Writer) (err error)
+
+ // NewServerConn creates a new WebSocket connection.
+ NewServerConn(buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) (conn *Conn)
+}
+
+// frameReader is an interface to read a WebSocket frame.
+type frameReader interface {
+ // Reader is to read payload of the frame.
+ io.Reader
+
+ // PayloadType returns payload type.
+ PayloadType() byte
+
+ // HeaderReader returns a reader to read header of the frame.
+ HeaderReader() io.Reader
+
+ // TrailerReader returns a reader to read trailer of the frame.
+ // If it returns nil, there is no trailer in the frame.
+ TrailerReader() io.Reader
+
+ // Len returns total length of the frame, including header and trailer.
+ Len() int
+}
+
+// frameReaderFactory is an interface to creates new frame reader.
+type frameReaderFactory interface {
+ NewFrameReader() (r frameReader, err error)
+}
+
+// frameWriter is an interface to write a WebSocket frame.
+type frameWriter interface {
+ // Writer is to write payload of the frame.
+ io.WriteCloser
+}
+
+// frameWriterFactory is an interface to create new frame writer.
+type frameWriterFactory interface {
+ NewFrameWriter(payloadType byte) (w frameWriter, err error)
+}
+
+type frameHandler interface {
+ HandleFrame(frame frameReader) (r frameReader, err error)
+ WriteClose(status int) (err error)
+}
+
+// Conn represents a WebSocket connection.
+//
+// Multiple goroutines may invoke methods on a Conn simultaneously.
+type Conn struct {
+ config *Config
+ request *http.Request
+
+ buf *bufio.ReadWriter
+ rwc io.ReadWriteCloser
+
+ rio sync.Mutex
+ frameReaderFactory
+ frameReader
+
+ wio sync.Mutex
+ frameWriterFactory
+
+ frameHandler
+ PayloadType byte
+ defaultCloseStatus int
+
+ // MaxPayloadBytes limits the size of frame payload received over Conn
+ // by Codec's Receive method. If zero, DefaultMaxPayloadBytes is used.
+ MaxPayloadBytes int
+}
+
+// Read implements the io.Reader interface:
+// it reads data of a frame from the WebSocket connection.
+// if msg is not large enough for the frame data, it fills the msg and next Read
+// will read the rest of the frame data.
+// it reads Text frame or Binary frame.
+func (ws *Conn) Read(msg []byte) (n int, err error) {
+ ws.rio.Lock()
+ defer ws.rio.Unlock()
+again:
+ if ws.frameReader == nil {
+ frame, err := ws.frameReaderFactory.NewFrameReader()
+ if err != nil {
+ return 0, err
+ }
+ ws.frameReader, err = ws.frameHandler.HandleFrame(frame)
+ if err != nil {
+ return 0, err
+ }
+ if ws.frameReader == nil {
+ goto again
+ }
+ }
+ n, err = ws.frameReader.Read(msg)
+ if err == io.EOF {
+ if trailer := ws.frameReader.TrailerReader(); trailer != nil {
+ io.Copy(io.Discard, trailer)
+ }
+ ws.frameReader = nil
+ goto again
+ }
+ return n, err
+}
+
+// Write implements the io.Writer interface:
+// it writes data as a frame to the WebSocket connection.
+func (ws *Conn) Write(msg []byte) (n int, err error) {
+ ws.wio.Lock()
+ defer ws.wio.Unlock()
+ w, err := ws.frameWriterFactory.NewFrameWriter(ws.PayloadType)
+ if err != nil {
+ return 0, err
+ }
+ n, err = w.Write(msg)
+ w.Close()
+ return n, err
+}
+
+// Close implements the io.Closer interface.
+func (ws *Conn) Close() error {
+ err := ws.frameHandler.WriteClose(ws.defaultCloseStatus)
+ err1 := ws.rwc.Close()
+ if err != nil {
+ return err
+ }
+ return err1
+}
+
+// IsClientConn reports whether ws is a client-side connection.
+func (ws *Conn) IsClientConn() bool { return ws.request == nil }
+
+// IsServerConn reports whether ws is a server-side connection.
+func (ws *Conn) IsServerConn() bool { return ws.request != nil }
+
+// LocalAddr returns the WebSocket Origin for the connection for client, or
+// the WebSocket location for server.
+func (ws *Conn) LocalAddr() net.Addr {
+ if ws.IsClientConn() {
+ return &Addr{ws.config.Origin}
+ }
+ return &Addr{ws.config.Location}
+}
+
+// RemoteAddr returns the WebSocket location for the connection for client, or
+// the Websocket Origin for server.
+func (ws *Conn) RemoteAddr() net.Addr {
+ if ws.IsClientConn() {
+ return &Addr{ws.config.Location}
+ }
+ return &Addr{ws.config.Origin}
+}
+
+var errSetDeadline = errors.New("websocket: cannot set deadline: not using a net.Conn")
+
+// SetDeadline sets the connection's network read & write deadlines.
+func (ws *Conn) SetDeadline(t time.Time) error {
+ if conn, ok := ws.rwc.(net.Conn); ok {
+ return conn.SetDeadline(t)
+ }
+ return errSetDeadline
+}
+
+// SetReadDeadline sets the connection's network read deadline.
+func (ws *Conn) SetReadDeadline(t time.Time) error {
+ if conn, ok := ws.rwc.(net.Conn); ok {
+ return conn.SetReadDeadline(t)
+ }
+ return errSetDeadline
+}
+
+// SetWriteDeadline sets the connection's network write deadline.
+func (ws *Conn) SetWriteDeadline(t time.Time) error {
+ if conn, ok := ws.rwc.(net.Conn); ok {
+ return conn.SetWriteDeadline(t)
+ }
+ return errSetDeadline
+}
+
+// Config returns the WebSocket config.
+func (ws *Conn) Config() *Config { return ws.config }
+
+// Request returns the http request upgraded to the WebSocket.
+// It is nil for client side.
+func (ws *Conn) Request() *http.Request { return ws.request }
+
+// Codec represents a symmetric pair of functions that implement a codec.
+type Codec struct {
+ Marshal func(v interface{}) (data []byte, payloadType byte, err error)
+ Unmarshal func(data []byte, payloadType byte, v interface{}) (err error)
+}
+
+// Send sends v marshaled by cd.Marshal as single frame to ws.
+func (cd Codec) Send(ws *Conn, v interface{}) (err error) {
+ data, payloadType, err := cd.Marshal(v)
+ if err != nil {
+ return err
+ }
+ ws.wio.Lock()
+ defer ws.wio.Unlock()
+ w, err := ws.frameWriterFactory.NewFrameWriter(payloadType)
+ if err != nil {
+ return err
+ }
+ _, err = w.Write(data)
+ w.Close()
+ return err
+}
+
+// Receive receives single frame from ws, unmarshaled by cd.Unmarshal and stores
+// in v. The whole frame payload is read to an in-memory buffer; max size of
+// payload is defined by ws.MaxPayloadBytes. If frame payload size exceeds
+// limit, ErrFrameTooLarge is returned; in this case frame is not read off wire
+// completely. The next call to Receive would read and discard leftover data of
+// previous oversized frame before processing next frame.
+func (cd Codec) Receive(ws *Conn, v interface{}) (err error) {
+ ws.rio.Lock()
+ defer ws.rio.Unlock()
+ if ws.frameReader != nil {
+ _, err = io.Copy(io.Discard, ws.frameReader)
+ if err != nil {
+ return err
+ }
+ ws.frameReader = nil
+ }
+again:
+ frame, err := ws.frameReaderFactory.NewFrameReader()
+ if err != nil {
+ return err
+ }
+ frame, err = ws.frameHandler.HandleFrame(frame)
+ if err != nil {
+ return err
+ }
+ if frame == nil {
+ goto again
+ }
+ maxPayloadBytes := ws.MaxPayloadBytes
+ if maxPayloadBytes == 0 {
+ maxPayloadBytes = DefaultMaxPayloadBytes
+ }
+ if hf, ok := frame.(*hybiFrameReader); ok && hf.header.Length > int64(maxPayloadBytes) {
+ // payload size exceeds limit, no need to call Unmarshal
+ //
+ // set frameReader to current oversized frame so that
+ // the next call to this function can drain leftover
+ // data before processing the next frame
+ ws.frameReader = frame
+ return ErrFrameTooLarge
+ }
+ payloadType := frame.PayloadType()
+ data, err := io.ReadAll(frame)
+ if err != nil {
+ return err
+ }
+ return cd.Unmarshal(data, payloadType, v)
+}
+
+func marshal(v interface{}) (msg []byte, payloadType byte, err error) {
+ switch data := v.(type) {
+ case string:
+ return []byte(data), TextFrame, nil
+ case []byte:
+ return data, BinaryFrame, nil
+ }
+ return nil, UnknownFrame, ErrNotSupported
+}
+
+func unmarshal(msg []byte, payloadType byte, v interface{}) (err error) {
+ switch data := v.(type) {
+ case *string:
+ *data = string(msg)
+ return nil
+ case *[]byte:
+ *data = msg
+ return nil
+ }
+ return ErrNotSupported
+}
+
+/*
+Message is a codec to send/receive text/binary data in a frame on WebSocket connection.
+To send/receive text frame, use string type.
+To send/receive binary frame, use []byte type.
+
+Trivial usage:
+
+ import "websocket"
+
+ // receive text frame
+ var message string
+ websocket.Message.Receive(ws, &message)
+
+ // send text frame
+ message = "hello"
+ websocket.Message.Send(ws, message)
+
+ // receive binary frame
+ var data []byte
+ websocket.Message.Receive(ws, &data)
+
+ // send binary frame
+ data = []byte{0, 1, 2}
+ websocket.Message.Send(ws, data)
+*/
+var Message = Codec{marshal, unmarshal}
+
+func jsonMarshal(v interface{}) (msg []byte, payloadType byte, err error) {
+ msg, err = json.Marshal(v)
+ return msg, TextFrame, err
+}
+
+func jsonUnmarshal(msg []byte, payloadType byte, v interface{}) (err error) {
+ return json.Unmarshal(msg, v)
+}
+
+/*
+JSON is a codec to send/receive JSON data in a frame from a WebSocket connection.
+
+Trivial usage:
+
+ import "websocket"
+
+ type T struct {
+ Msg string
+ Count int
+ }
+
+ // receive JSON type T
+ var data T
+ websocket.JSON.Receive(ws, &data)
+
+ // send JSON type T
+ websocket.JSON.Send(ws, data)
+*/
+var JSON = Codec{jsonMarshal, jsonUnmarshal}
diff --git a/vendor/golang.org/x/sync/LICENSE b/vendor/golang.org/x/sync/LICENSE
new file mode 100644
index 000000000..2a7cf70da
--- /dev/null
+++ b/vendor/golang.org/x/sync/LICENSE
@@ -0,0 +1,27 @@
+Copyright 2009 The Go Authors.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google LLC nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/sync/PATENTS b/vendor/golang.org/x/sync/PATENTS
new file mode 100644
index 000000000..733099041
--- /dev/null
+++ b/vendor/golang.org/x/sync/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/vendor/golang.org/x/sync/singleflight/singleflight.go b/vendor/golang.org/x/sync/singleflight/singleflight.go
new file mode 100644
index 000000000..405183098
--- /dev/null
+++ b/vendor/golang.org/x/sync/singleflight/singleflight.go
@@ -0,0 +1,214 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package singleflight provides a duplicate function call suppression
+// mechanism.
+package singleflight // import "golang.org/x/sync/singleflight"
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "runtime"
+ "runtime/debug"
+ "sync"
+)
+
+// errGoexit indicates the runtime.Goexit was called in
+// the user given function.
+var errGoexit = errors.New("runtime.Goexit was called")
+
+// A panicError is an arbitrary value recovered from a panic
+// with the stack trace during the execution of given function.
+type panicError struct {
+ value interface{}
+ stack []byte
+}
+
+// Error implements error interface.
+func (p *panicError) Error() string {
+ return fmt.Sprintf("%v\n\n%s", p.value, p.stack)
+}
+
+func (p *panicError) Unwrap() error {
+ err, ok := p.value.(error)
+ if !ok {
+ return nil
+ }
+
+ return err
+}
+
+func newPanicError(v interface{}) error {
+ stack := debug.Stack()
+
+ // The first line of the stack trace is of the form "goroutine N [status]:"
+ // but by the time the panic reaches Do the goroutine may no longer exist
+ // and its status will have changed. Trim out the misleading line.
+ if line := bytes.IndexByte(stack[:], '\n'); line >= 0 {
+ stack = stack[line+1:]
+ }
+ return &panicError{value: v, stack: stack}
+}
+
+// call is an in-flight or completed singleflight.Do call
+type call struct {
+ wg sync.WaitGroup
+
+ // These fields are written once before the WaitGroup is done
+ // and are only read after the WaitGroup is done.
+ val interface{}
+ err error
+
+ // These fields are read and written with the singleflight
+ // mutex held before the WaitGroup is done, and are read but
+ // not written after the WaitGroup is done.
+ dups int
+ chans []chan<- Result
+}
+
+// Group represents a class of work and forms a namespace in
+// which units of work can be executed with duplicate suppression.
+type Group struct {
+ mu sync.Mutex // protects m
+ m map[string]*call // lazily initialized
+}
+
+// Result holds the results of Do, so they can be passed
+// on a channel.
+type Result struct {
+ Val interface{}
+ Err error
+ Shared bool
+}
+
+// Do executes and returns the results of the given function, making
+// sure that only one execution is in-flight for a given key at a
+// time. If a duplicate comes in, the duplicate caller waits for the
+// original to complete and receives the same results.
+// The return value shared indicates whether v was given to multiple callers.
+func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) {
+ g.mu.Lock()
+ if g.m == nil {
+ g.m = make(map[string]*call)
+ }
+ if c, ok := g.m[key]; ok {
+ c.dups++
+ g.mu.Unlock()
+ c.wg.Wait()
+
+ if e, ok := c.err.(*panicError); ok {
+ panic(e)
+ } else if c.err == errGoexit {
+ runtime.Goexit()
+ }
+ return c.val, c.err, true
+ }
+ c := new(call)
+ c.wg.Add(1)
+ g.m[key] = c
+ g.mu.Unlock()
+
+ g.doCall(c, key, fn)
+ return c.val, c.err, c.dups > 0
+}
+
+// DoChan is like Do but returns a channel that will receive the
+// results when they are ready.
+//
+// The returned channel will not be closed.
+func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result {
+ ch := make(chan Result, 1)
+ g.mu.Lock()
+ if g.m == nil {
+ g.m = make(map[string]*call)
+ }
+ if c, ok := g.m[key]; ok {
+ c.dups++
+ c.chans = append(c.chans, ch)
+ g.mu.Unlock()
+ return ch
+ }
+ c := &call{chans: []chan<- Result{ch}}
+ c.wg.Add(1)
+ g.m[key] = c
+ g.mu.Unlock()
+
+ go g.doCall(c, key, fn)
+
+ return ch
+}
+
+// doCall handles the single call for a key.
+func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) {
+ normalReturn := false
+ recovered := false
+
+ // use double-defer to distinguish panic from runtime.Goexit,
+ // more details see https://golang.org/cl/134395
+ defer func() {
+ // the given function invoked runtime.Goexit
+ if !normalReturn && !recovered {
+ c.err = errGoexit
+ }
+
+ g.mu.Lock()
+ defer g.mu.Unlock()
+ c.wg.Done()
+ if g.m[key] == c {
+ delete(g.m, key)
+ }
+
+ if e, ok := c.err.(*panicError); ok {
+ // In order to prevent the waiting channels from being blocked forever,
+ // needs to ensure that this panic cannot be recovered.
+ if len(c.chans) > 0 {
+ go panic(e)
+ select {} // Keep this goroutine around so that it will appear in the crash dump.
+ } else {
+ panic(e)
+ }
+ } else if c.err == errGoexit {
+ // Already in the process of goexit, no need to call again
+ } else {
+ // Normal return
+ for _, ch := range c.chans {
+ ch <- Result{c.val, c.err, c.dups > 0}
+ }
+ }
+ }()
+
+ func() {
+ defer func() {
+ if !normalReturn {
+ // Ideally, we would wait to take a stack trace until we've determined
+ // whether this is a panic or a runtime.Goexit.
+ //
+ // Unfortunately, the only way we can distinguish the two is to see
+ // whether the recover stopped the goroutine from terminating, and by
+ // the time we know that, the part of the stack trace relevant to the
+ // panic has been discarded.
+ if r := recover(); r != nil {
+ c.err = newPanicError(r)
+ }
+ }
+ }()
+
+ c.val, c.err = fn()
+ normalReturn = true
+ }()
+
+ if !normalReturn {
+ recovered = true
+ }
+}
+
+// Forget tells the singleflight to forget about a key. Future calls
+// to Do for this key will call the function rather than waiting for
+// an earlier call to complete.
+func (g *Group) Forget(key string) {
+ g.mu.Lock()
+ delete(g.m, key)
+ g.mu.Unlock()
+}
diff --git a/vendor/golang.org/x/sys/windows/registry/key.go b/vendor/golang.org/x/sys/windows/registry/key.go
new file mode 100644
index 000000000..39aeeb644
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/registry/key.go
@@ -0,0 +1,214 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build windows
+
+// Package registry provides access to the Windows registry.
+//
+// Here is a simple example, opening a registry key and reading a string value from it.
+//
+// k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE)
+// if err != nil {
+// log.Fatal(err)
+// }
+// defer k.Close()
+//
+// s, _, err := k.GetStringValue("SystemRoot")
+// if err != nil {
+// log.Fatal(err)
+// }
+// fmt.Printf("Windows system root is %q\n", s)
+package registry
+
+import (
+ "io"
+ "runtime"
+ "syscall"
+ "time"
+)
+
+const (
+ // Registry key security and access rights.
+ // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms724878.aspx
+ // for details.
+ ALL_ACCESS = 0xf003f
+ CREATE_LINK = 0x00020
+ CREATE_SUB_KEY = 0x00004
+ ENUMERATE_SUB_KEYS = 0x00008
+ EXECUTE = 0x20019
+ NOTIFY = 0x00010
+ QUERY_VALUE = 0x00001
+ READ = 0x20019
+ SET_VALUE = 0x00002
+ WOW64_32KEY = 0x00200
+ WOW64_64KEY = 0x00100
+ WRITE = 0x20006
+)
+
+// Key is a handle to an open Windows registry key.
+// Keys can be obtained by calling OpenKey; there are
+// also some predefined root keys such as CURRENT_USER.
+// Keys can be used directly in the Windows API.
+type Key syscall.Handle
+
+const (
+ // Windows defines some predefined root keys that are always open.
+ // An application can use these keys as entry points to the registry.
+ // Normally these keys are used in OpenKey to open new keys,
+ // but they can also be used anywhere a Key is required.
+ CLASSES_ROOT = Key(syscall.HKEY_CLASSES_ROOT)
+ CURRENT_USER = Key(syscall.HKEY_CURRENT_USER)
+ LOCAL_MACHINE = Key(syscall.HKEY_LOCAL_MACHINE)
+ USERS = Key(syscall.HKEY_USERS)
+ CURRENT_CONFIG = Key(syscall.HKEY_CURRENT_CONFIG)
+ PERFORMANCE_DATA = Key(syscall.HKEY_PERFORMANCE_DATA)
+)
+
+// Close closes open key k.
+func (k Key) Close() error {
+ return syscall.RegCloseKey(syscall.Handle(k))
+}
+
+// OpenKey opens a new key with path name relative to key k.
+// It accepts any open key, including CURRENT_USER and others,
+// and returns the new key and an error.
+// The access parameter specifies desired access rights to the
+// key to be opened.
+func OpenKey(k Key, path string, access uint32) (Key, error) {
+ p, err := syscall.UTF16PtrFromString(path)
+ if err != nil {
+ return 0, err
+ }
+ var subkey syscall.Handle
+ err = syscall.RegOpenKeyEx(syscall.Handle(k), p, 0, access, &subkey)
+ if err != nil {
+ return 0, err
+ }
+ return Key(subkey), nil
+}
+
+// OpenRemoteKey opens a predefined registry key on another
+// computer pcname. The key to be opened is specified by k, but
+// can only be one of LOCAL_MACHINE, PERFORMANCE_DATA or USERS.
+// If pcname is "", OpenRemoteKey returns local computer key.
+func OpenRemoteKey(pcname string, k Key) (Key, error) {
+ var err error
+ var p *uint16
+ if pcname != "" {
+ p, err = syscall.UTF16PtrFromString(`\\` + pcname)
+ if err != nil {
+ return 0, err
+ }
+ }
+ var remoteKey syscall.Handle
+ err = regConnectRegistry(p, syscall.Handle(k), &remoteKey)
+ if err != nil {
+ return 0, err
+ }
+ return Key(remoteKey), nil
+}
+
+// ReadSubKeyNames returns the names of subkeys of key k.
+// The parameter n controls the number of returned names,
+// analogous to the way os.File.Readdirnames works.
+func (k Key) ReadSubKeyNames(n int) ([]string, error) {
+ // RegEnumKeyEx must be called repeatedly and to completion.
+ // During this time, this goroutine cannot migrate away from
+ // its current thread. See https://golang.org/issue/49320 and
+ // https://golang.org/issue/49466.
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+
+ names := make([]string, 0)
+ // Registry key size limit is 255 bytes and described there:
+ // https://msdn.microsoft.com/library/windows/desktop/ms724872.aspx
+ buf := make([]uint16, 256) //plus extra room for terminating zero byte
+loopItems:
+ for i := uint32(0); ; i++ {
+ if n > 0 {
+ if len(names) == n {
+ return names, nil
+ }
+ }
+ l := uint32(len(buf))
+ for {
+ err := syscall.RegEnumKeyEx(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil)
+ if err == nil {
+ break
+ }
+ if err == syscall.ERROR_MORE_DATA {
+ // Double buffer size and try again.
+ l = uint32(2 * len(buf))
+ buf = make([]uint16, l)
+ continue
+ }
+ if err == _ERROR_NO_MORE_ITEMS {
+ break loopItems
+ }
+ return names, err
+ }
+ names = append(names, syscall.UTF16ToString(buf[:l]))
+ }
+ if n > len(names) {
+ return names, io.EOF
+ }
+ return names, nil
+}
+
+// CreateKey creates a key named path under open key k.
+// CreateKey returns the new key and a boolean flag that reports
+// whether the key already existed.
+// The access parameter specifies the access rights for the key
+// to be created.
+func CreateKey(k Key, path string, access uint32) (newk Key, openedExisting bool, err error) {
+ var h syscall.Handle
+ var d uint32
+ var pathPointer *uint16
+ pathPointer, err = syscall.UTF16PtrFromString(path)
+ if err != nil {
+ return 0, false, err
+ }
+ err = regCreateKeyEx(syscall.Handle(k), pathPointer,
+ 0, nil, _REG_OPTION_NON_VOLATILE, access, nil, &h, &d)
+ if err != nil {
+ return 0, false, err
+ }
+ return Key(h), d == _REG_OPENED_EXISTING_KEY, nil
+}
+
+// DeleteKey deletes the subkey path of key k and its values.
+func DeleteKey(k Key, path string) error {
+ pathPointer, err := syscall.UTF16PtrFromString(path)
+ if err != nil {
+ return err
+ }
+ return regDeleteKey(syscall.Handle(k), pathPointer)
+}
+
+// A KeyInfo describes the statistics of a key. It is returned by Stat.
+type KeyInfo struct {
+ SubKeyCount uint32
+ MaxSubKeyLen uint32 // size of the key's subkey with the longest name, in Unicode characters, not including the terminating zero byte
+ ValueCount uint32
+ MaxValueNameLen uint32 // size of the key's longest value name, in Unicode characters, not including the terminating zero byte
+ MaxValueLen uint32 // longest data component among the key's values, in bytes
+ lastWriteTime syscall.Filetime
+}
+
+// ModTime returns the key's last write time.
+func (ki *KeyInfo) ModTime() time.Time {
+ return time.Unix(0, ki.lastWriteTime.Nanoseconds())
+}
+
+// Stat retrieves information about the open key k.
+func (k Key) Stat() (*KeyInfo, error) {
+ var ki KeyInfo
+ err := syscall.RegQueryInfoKey(syscall.Handle(k), nil, nil, nil,
+ &ki.SubKeyCount, &ki.MaxSubKeyLen, nil, &ki.ValueCount,
+ &ki.MaxValueNameLen, &ki.MaxValueLen, nil, &ki.lastWriteTime)
+ if err != nil {
+ return nil, err
+ }
+ return &ki, nil
+}
diff --git a/vendor/golang.org/x/sys/windows/registry/mksyscall.go b/vendor/golang.org/x/sys/windows/registry/mksyscall.go
new file mode 100644
index 000000000..bbf86ccf0
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/registry/mksyscall.go
@@ -0,0 +1,9 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build generate
+
+package registry
+
+//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go syscall.go
diff --git a/vendor/golang.org/x/sys/windows/registry/syscall.go b/vendor/golang.org/x/sys/windows/registry/syscall.go
new file mode 100644
index 000000000..f533091c1
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/registry/syscall.go
@@ -0,0 +1,32 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build windows
+
+package registry
+
+import "syscall"
+
+const (
+ _REG_OPTION_NON_VOLATILE = 0
+
+ _REG_CREATED_NEW_KEY = 1
+ _REG_OPENED_EXISTING_KEY = 2
+
+ _ERROR_NO_MORE_ITEMS syscall.Errno = 259
+)
+
+func LoadRegLoadMUIString() error {
+ return procRegLoadMUIStringW.Find()
+}
+
+//sys regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) = advapi32.RegCreateKeyExW
+//sys regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) = advapi32.RegDeleteKeyW
+//sys regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) = advapi32.RegSetValueExW
+//sys regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) = advapi32.RegEnumValueW
+//sys regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) = advapi32.RegDeleteValueW
+//sys regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) = advapi32.RegLoadMUIStringW
+//sys regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) = advapi32.RegConnectRegistryW
+
+//sys expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) = kernel32.ExpandEnvironmentStringsW
diff --git a/vendor/golang.org/x/sys/windows/registry/value.go b/vendor/golang.org/x/sys/windows/registry/value.go
new file mode 100644
index 000000000..a1bcbb236
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/registry/value.go
@@ -0,0 +1,390 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build windows
+
+package registry
+
+import (
+ "errors"
+ "io"
+ "syscall"
+ "unicode/utf16"
+ "unsafe"
+)
+
+const (
+ // Registry value types.
+ NONE = 0
+ SZ = 1
+ EXPAND_SZ = 2
+ BINARY = 3
+ DWORD = 4
+ DWORD_BIG_ENDIAN = 5
+ LINK = 6
+ MULTI_SZ = 7
+ RESOURCE_LIST = 8
+ FULL_RESOURCE_DESCRIPTOR = 9
+ RESOURCE_REQUIREMENTS_LIST = 10
+ QWORD = 11
+)
+
+var (
+ // ErrShortBuffer is returned when the buffer was too short for the operation.
+ ErrShortBuffer = syscall.ERROR_MORE_DATA
+
+ // ErrNotExist is returned when a registry key or value does not exist.
+ ErrNotExist = syscall.ERROR_FILE_NOT_FOUND
+
+ // ErrUnexpectedType is returned by Get*Value when the value's type was unexpected.
+ ErrUnexpectedType = errors.New("unexpected key value type")
+)
+
+// GetValue retrieves the type and data for the specified value associated
+// with an open key k. It fills up buffer buf and returns the retrieved
+// byte count n. If buf is too small to fit the stored value it returns
+// ErrShortBuffer error along with the required buffer size n.
+// If no buffer is provided, it returns true and actual buffer size n.
+// If no buffer is provided, GetValue returns the value's type only.
+// If the value does not exist, the error returned is ErrNotExist.
+//
+// GetValue is a low level function. If value's type is known, use the appropriate
+// Get*Value function instead.
+func (k Key) GetValue(name string, buf []byte) (n int, valtype uint32, err error) {
+ pname, err := syscall.UTF16PtrFromString(name)
+ if err != nil {
+ return 0, 0, err
+ }
+ var pbuf *byte
+ if len(buf) > 0 {
+ pbuf = (*byte)(unsafe.Pointer(&buf[0]))
+ }
+ l := uint32(len(buf))
+ err = syscall.RegQueryValueEx(syscall.Handle(k), pname, nil, &valtype, pbuf, &l)
+ if err != nil {
+ return int(l), valtype, err
+ }
+ return int(l), valtype, nil
+}
+
+func (k Key) getValue(name string, buf []byte) (data []byte, valtype uint32, err error) {
+ p, err := syscall.UTF16PtrFromString(name)
+ if err != nil {
+ return nil, 0, err
+ }
+ var t uint32
+ n := uint32(len(buf))
+ for {
+ err = syscall.RegQueryValueEx(syscall.Handle(k), p, nil, &t, (*byte)(unsafe.Pointer(&buf[0])), &n)
+ if err == nil {
+ return buf[:n], t, nil
+ }
+ if err != syscall.ERROR_MORE_DATA {
+ return nil, 0, err
+ }
+ if n <= uint32(len(buf)) {
+ return nil, 0, err
+ }
+ buf = make([]byte, n)
+ }
+}
+
+// GetStringValue retrieves the string value for the specified
+// value name associated with an open key k. It also returns the value's type.
+// If value does not exist, GetStringValue returns ErrNotExist.
+// If value is not SZ or EXPAND_SZ, it will return the correct value
+// type and ErrUnexpectedType.
+func (k Key) GetStringValue(name string) (val string, valtype uint32, err error) {
+ data, typ, err2 := k.getValue(name, make([]byte, 64))
+ if err2 != nil {
+ return "", typ, err2
+ }
+ switch typ {
+ case SZ, EXPAND_SZ:
+ default:
+ return "", typ, ErrUnexpectedType
+ }
+ if len(data) == 0 {
+ return "", typ, nil
+ }
+ u := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[: len(data)/2 : len(data)/2]
+ return syscall.UTF16ToString(u), typ, nil
+}
+
+// GetMUIStringValue retrieves the localized string value for
+// the specified value name associated with an open key k.
+// If the value name doesn't exist or the localized string value
+// can't be resolved, GetMUIStringValue returns ErrNotExist.
+// GetMUIStringValue panics if the system doesn't support
+// regLoadMUIString; use LoadRegLoadMUIString to check if
+// regLoadMUIString is supported before calling this function.
+func (k Key) GetMUIStringValue(name string) (string, error) {
+ pname, err := syscall.UTF16PtrFromString(name)
+ if err != nil {
+ return "", err
+ }
+
+ buf := make([]uint16, 1024)
+ var buflen uint32
+ var pdir *uint16
+
+ err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir)
+ if err == syscall.ERROR_FILE_NOT_FOUND { // Try fallback path
+
+ // Try to resolve the string value using the system directory as
+ // a DLL search path; this assumes the string value is of the form
+ // @[path]\dllname,-strID but with no path given, e.g. @tzres.dll,-320.
+
+ // This approach works with tzres.dll but may have to be revised
+ // in the future to allow callers to provide custom search paths.
+
+ var s string
+ s, err = ExpandString("%SystemRoot%\\system32\\")
+ if err != nil {
+ return "", err
+ }
+ pdir, err = syscall.UTF16PtrFromString(s)
+ if err != nil {
+ return "", err
+ }
+
+ err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir)
+ }
+
+ for err == syscall.ERROR_MORE_DATA { // Grow buffer if needed
+ if buflen <= uint32(len(buf)) {
+ break // Buffer not growing, assume race; break
+ }
+ buf = make([]uint16, buflen)
+ err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir)
+ }
+
+ if err != nil {
+ return "", err
+ }
+
+ return syscall.UTF16ToString(buf), nil
+}
+
+// ExpandString expands environment-variable strings and replaces
+// them with the values defined for the current user.
+// Use ExpandString to expand EXPAND_SZ strings.
+func ExpandString(value string) (string, error) {
+ if value == "" {
+ return "", nil
+ }
+ p, err := syscall.UTF16PtrFromString(value)
+ if err != nil {
+ return "", err
+ }
+ r := make([]uint16, 100)
+ for {
+ n, err := expandEnvironmentStrings(p, &r[0], uint32(len(r)))
+ if err != nil {
+ return "", err
+ }
+ if n <= uint32(len(r)) {
+ return syscall.UTF16ToString(r[:n]), nil
+ }
+ r = make([]uint16, n)
+ }
+}
+
+// GetStringsValue retrieves the []string value for the specified
+// value name associated with an open key k. It also returns the value's type.
+// If value does not exist, GetStringsValue returns ErrNotExist.
+// If value is not MULTI_SZ, it will return the correct value
+// type and ErrUnexpectedType.
+func (k Key) GetStringsValue(name string) (val []string, valtype uint32, err error) {
+ data, typ, err2 := k.getValue(name, make([]byte, 64))
+ if err2 != nil {
+ return nil, typ, err2
+ }
+ if typ != MULTI_SZ {
+ return nil, typ, ErrUnexpectedType
+ }
+ if len(data) == 0 {
+ return nil, typ, nil
+ }
+ p := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[: len(data)/2 : len(data)/2]
+ if len(p) == 0 {
+ return nil, typ, nil
+ }
+ if p[len(p)-1] == 0 {
+ p = p[:len(p)-1] // remove terminating null
+ }
+ val = make([]string, 0, 5)
+ from := 0
+ for i, c := range p {
+ if c == 0 {
+ val = append(val, string(utf16.Decode(p[from:i])))
+ from = i + 1
+ }
+ }
+ return val, typ, nil
+}
+
+// GetIntegerValue retrieves the integer value for the specified
+// value name associated with an open key k. It also returns the value's type.
+// If value does not exist, GetIntegerValue returns ErrNotExist.
+// If value is not DWORD or QWORD, it will return the correct value
+// type and ErrUnexpectedType.
+func (k Key) GetIntegerValue(name string) (val uint64, valtype uint32, err error) {
+ data, typ, err2 := k.getValue(name, make([]byte, 8))
+ if err2 != nil {
+ return 0, typ, err2
+ }
+ switch typ {
+ case DWORD:
+ if len(data) != 4 {
+ return 0, typ, errors.New("DWORD value is not 4 bytes long")
+ }
+ var val32 uint32
+ copy((*[4]byte)(unsafe.Pointer(&val32))[:], data)
+ return uint64(val32), DWORD, nil
+ case QWORD:
+ if len(data) != 8 {
+ return 0, typ, errors.New("QWORD value is not 8 bytes long")
+ }
+ copy((*[8]byte)(unsafe.Pointer(&val))[:], data)
+ return val, QWORD, nil
+ default:
+ return 0, typ, ErrUnexpectedType
+ }
+}
+
+// GetBinaryValue retrieves the binary value for the specified
+// value name associated with an open key k. It also returns the value's type.
+// If value does not exist, GetBinaryValue returns ErrNotExist.
+// If value is not BINARY, it will return the correct value
+// type and ErrUnexpectedType.
+func (k Key) GetBinaryValue(name string) (val []byte, valtype uint32, err error) {
+ data, typ, err2 := k.getValue(name, make([]byte, 64))
+ if err2 != nil {
+ return nil, typ, err2
+ }
+ if typ != BINARY {
+ return nil, typ, ErrUnexpectedType
+ }
+ return data, typ, nil
+}
+
+func (k Key) setValue(name string, valtype uint32, data []byte) error {
+ p, err := syscall.UTF16PtrFromString(name)
+ if err != nil {
+ return err
+ }
+ if len(data) == 0 {
+ return regSetValueEx(syscall.Handle(k), p, 0, valtype, nil, 0)
+ }
+ return regSetValueEx(syscall.Handle(k), p, 0, valtype, &data[0], uint32(len(data)))
+}
+
+// SetDWordValue sets the data and type of a name value
+// under key k to value and DWORD.
+func (k Key) SetDWordValue(name string, value uint32) error {
+ return k.setValue(name, DWORD, (*[4]byte)(unsafe.Pointer(&value))[:])
+}
+
+// SetQWordValue sets the data and type of a name value
+// under key k to value and QWORD.
+func (k Key) SetQWordValue(name string, value uint64) error {
+ return k.setValue(name, QWORD, (*[8]byte)(unsafe.Pointer(&value))[:])
+}
+
+func (k Key) setStringValue(name string, valtype uint32, value string) error {
+ v, err := syscall.UTF16FromString(value)
+ if err != nil {
+ return err
+ }
+ buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[: len(v)*2 : len(v)*2]
+ return k.setValue(name, valtype, buf)
+}
+
+// SetStringValue sets the data and type of a name value
+// under key k to value and SZ. The value must not contain a zero byte.
+func (k Key) SetStringValue(name, value string) error {
+ return k.setStringValue(name, SZ, value)
+}
+
+// SetExpandStringValue sets the data and type of a name value
+// under key k to value and EXPAND_SZ. The value must not contain a zero byte.
+func (k Key) SetExpandStringValue(name, value string) error {
+ return k.setStringValue(name, EXPAND_SZ, value)
+}
+
+// SetStringsValue sets the data and type of a name value
+// under key k to value and MULTI_SZ. The value strings
+// must not contain a zero byte.
+func (k Key) SetStringsValue(name string, value []string) error {
+ ss := ""
+ for _, s := range value {
+ for i := 0; i < len(s); i++ {
+ if s[i] == 0 {
+ return errors.New("string cannot have 0 inside")
+ }
+ }
+ ss += s + "\x00"
+ }
+ v := utf16.Encode([]rune(ss + "\x00"))
+ buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[: len(v)*2 : len(v)*2]
+ return k.setValue(name, MULTI_SZ, buf)
+}
+
+// SetBinaryValue sets the data and type of a name value
+// under key k to value and BINARY.
+func (k Key) SetBinaryValue(name string, value []byte) error {
+ return k.setValue(name, BINARY, value)
+}
+
+// DeleteValue removes a named value from the key k.
+func (k Key) DeleteValue(name string) error {
+ namePointer, err := syscall.UTF16PtrFromString(name)
+ if err != nil {
+ return err
+ }
+ return regDeleteValue(syscall.Handle(k), namePointer)
+}
+
+// ReadValueNames returns the value names of key k.
+// The parameter n controls the number of returned names,
+// analogous to the way os.File.Readdirnames works.
+func (k Key) ReadValueNames(n int) ([]string, error) {
+ ki, err := k.Stat()
+ if err != nil {
+ return nil, err
+ }
+ names := make([]string, 0, ki.ValueCount)
+ buf := make([]uint16, ki.MaxValueNameLen+1) // extra room for terminating null character
+loopItems:
+ for i := uint32(0); ; i++ {
+ if n > 0 {
+ if len(names) == n {
+ return names, nil
+ }
+ }
+ l := uint32(len(buf))
+ for {
+ err := regEnumValue(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil)
+ if err == nil {
+ break
+ }
+ if err == syscall.ERROR_MORE_DATA {
+ // Double buffer size and try again.
+ l = uint32(2 * len(buf))
+ buf = make([]uint16, l)
+ continue
+ }
+ if err == _ERROR_NO_MORE_ITEMS {
+ break loopItems
+ }
+ return names, err
+ }
+ names = append(names, syscall.UTF16ToString(buf[:l]))
+ }
+ if n > len(names) {
+ return names, io.EOF
+ }
+ return names, nil
+}
diff --git a/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go
new file mode 100644
index 000000000..fc1835d8a
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go
@@ -0,0 +1,117 @@
+// Code generated by 'go generate'; DO NOT EDIT.
+
+package registry
+
+import (
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+)
+
+var _ unsafe.Pointer
+
+// Do the interface allocations only once for common
+// Errno values.
+const (
+ errnoERROR_IO_PENDING = 997
+)
+
+var (
+ errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
+ errERROR_EINVAL error = syscall.EINVAL
+)
+
+// errnoErr returns common boxed Errno values, to prevent
+// allocations at runtime.
+func errnoErr(e syscall.Errno) error {
+ switch e {
+ case 0:
+ return errERROR_EINVAL
+ case errnoERROR_IO_PENDING:
+ return errERROR_IO_PENDING
+ }
+ // TODO: add more here, after collecting data on the common
+ // error values see on Windows. (perhaps when running
+ // all.bat?)
+ return e
+}
+
+var (
+ modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
+ modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
+
+ procRegConnectRegistryW = modadvapi32.NewProc("RegConnectRegistryW")
+ procRegCreateKeyExW = modadvapi32.NewProc("RegCreateKeyExW")
+ procRegDeleteKeyW = modadvapi32.NewProc("RegDeleteKeyW")
+ procRegDeleteValueW = modadvapi32.NewProc("RegDeleteValueW")
+ procRegEnumValueW = modadvapi32.NewProc("RegEnumValueW")
+ procRegLoadMUIStringW = modadvapi32.NewProc("RegLoadMUIStringW")
+ procRegSetValueExW = modadvapi32.NewProc("RegSetValueExW")
+ procExpandEnvironmentStringsW = modkernel32.NewProc("ExpandEnvironmentStringsW")
+)
+
+func regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) {
+ r0, _, _ := syscall.Syscall(procRegConnectRegistryW.Addr(), 3, uintptr(unsafe.Pointer(machinename)), uintptr(key), uintptr(unsafe.Pointer(result)))
+ if r0 != 0 {
+ regerrno = syscall.Errno(r0)
+ }
+ return
+}
+
+func regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) {
+ r0, _, _ := syscall.Syscall9(procRegCreateKeyExW.Addr(), 9, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(reserved), uintptr(unsafe.Pointer(class)), uintptr(options), uintptr(desired), uintptr(unsafe.Pointer(sa)), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition)))
+ if r0 != 0 {
+ regerrno = syscall.Errno(r0)
+ }
+ return
+}
+
+func regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) {
+ r0, _, _ := syscall.Syscall(procRegDeleteKeyW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(subkey)), 0)
+ if r0 != 0 {
+ regerrno = syscall.Errno(r0)
+ }
+ return
+}
+
+func regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) {
+ r0, _, _ := syscall.Syscall(procRegDeleteValueW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(name)), 0)
+ if r0 != 0 {
+ regerrno = syscall.Errno(r0)
+ }
+ return
+}
+
+func regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) {
+ r0, _, _ := syscall.Syscall9(procRegEnumValueW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen)), 0)
+ if r0 != 0 {
+ regerrno = syscall.Errno(r0)
+ }
+ return
+}
+
+func regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) {
+ r0, _, _ := syscall.Syscall9(procRegLoadMUIStringW.Addr(), 7, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(unsafe.Pointer(buflenCopied)), uintptr(flags), uintptr(unsafe.Pointer(dir)), 0, 0)
+ if r0 != 0 {
+ regerrno = syscall.Errno(r0)
+ }
+ return
+}
+
+func regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) {
+ r0, _, _ := syscall.Syscall6(procRegSetValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(valueName)), uintptr(reserved), uintptr(vtype), uintptr(unsafe.Pointer(buf)), uintptr(bufsize))
+ if r0 != 0 {
+ regerrno = syscall.Errno(r0)
+ }
+ return
+}
+
+func expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) {
+ r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size))
+ n = uint32(r0)
+ if n == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
diff --git a/vendor/golang.org/x/text/feature/plural/common.go b/vendor/golang.org/x/text/feature/plural/common.go
new file mode 100644
index 000000000..fdcb373fd
--- /dev/null
+++ b/vendor/golang.org/x/text/feature/plural/common.go
@@ -0,0 +1,70 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+package plural
+
+// Form defines a plural form.
+//
+// Not all languages support all forms. Also, the meaning of each form varies
+// per language. It is important to note that the name of a form does not
+// necessarily correspond one-to-one with the set of numbers. For instance,
+// for Croation, One matches not only 1, but also 11, 21, etc.
+//
+// Each language must at least support the form "other".
+type Form byte
+
+const (
+ Other Form = iota
+ Zero
+ One
+ Two
+ Few
+ Many
+)
+
+var countMap = map[string]Form{
+ "other": Other,
+ "zero": Zero,
+ "one": One,
+ "two": Two,
+ "few": Few,
+ "many": Many,
+}
+
+type pluralCheck struct {
+ // category:
+ // 3..7: opID
+ // 0..2: category
+ cat byte
+ setID byte
+}
+
+// opID identifies the type of operand in the plural rule, being i, n or f.
+// (v, w, and t are treated as filters in our implementation.)
+type opID byte
+
+const (
+ opMod opID = 0x1 // is '%' used?
+ opNotEqual opID = 0x2 // using "!=" to compare
+ opI opID = 0 << 2 // integers after taking the absolute value
+ opN opID = 1 << 2 // full number (must be integer)
+ opF opID = 2 << 2 // fraction
+ opV opID = 3 << 2 // number of visible digits
+ opW opID = 4 << 2 // number of visible digits without trailing zeros
+ opBretonM opID = 5 << 2 // hard-wired rule for Breton
+ opItalian800 opID = 6 << 2 // hard-wired rule for Italian
+ opAzerbaijan00s opID = 7 << 2 // hard-wired rule for Azerbaijan
+)
+const (
+ // Use this plural form to indicate the next rule needs to match as well.
+ // The last condition in the list will have the correct plural form.
+ andNext = 0x7
+ formMask = 0x7
+
+ opShift = 3
+
+ // numN indicates the maximum integer, or maximum mod value, for which we
+ // have inclusion masks.
+ numN = 100
+ // The common denominator of the modulo that is taken.
+ maxMod = 100
+)
diff --git a/vendor/golang.org/x/text/feature/plural/message.go b/vendor/golang.org/x/text/feature/plural/message.go
new file mode 100644
index 000000000..56d518cc3
--- /dev/null
+++ b/vendor/golang.org/x/text/feature/plural/message.go
@@ -0,0 +1,244 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package plural
+
+import (
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+
+ "golang.org/x/text/internal/catmsg"
+ "golang.org/x/text/internal/number"
+ "golang.org/x/text/language"
+ "golang.org/x/text/message/catalog"
+)
+
+// TODO: consider deleting this interface. Maybe VisibleDigits is always
+// sufficient and practical.
+
+// Interface is used for types that can determine their own plural form.
+type Interface interface {
+ // PluralForm reports the plural form for the given language of the
+ // underlying value. It also returns the integer value. If the integer value
+ // is larger than fits in n, PluralForm may return a value modulo
+ // 10,000,000.
+ PluralForm(t language.Tag, scale int) (f Form, n int)
+}
+
+// Selectf returns the first case for which its selector is a match for the
+// arg-th substitution argument to a formatting call, formatting it as indicated
+// by format.
+//
+// The cases argument are pairs of selectors and messages. Selectors are of type
+// string or Form. Messages are of type string or catalog.Message. A selector
+// matches an argument if:
+// - it is "other" or Other
+// - it matches the plural form of the argument: "zero", "one", "two", "few",
+// or "many", or the equivalent Form
+// - it is of the form "=x" where x is an integer that matches the value of
+// the argument.
+// - it is of the form " kindDefault {
+ e.EncodeUint(uint64(m.scale))
+ }
+
+ forms := validForms(cardinal, e.Language())
+
+ for i := 0; i < len(m.cases); {
+ if err := compileSelector(e, forms, m.cases[i]); err != nil {
+ return err
+ }
+ if i++; i >= len(m.cases) {
+ return fmt.Errorf("plural: no message defined for selector %v", m.cases[i-1])
+ }
+ var msg catalog.Message
+ switch x := m.cases[i].(type) {
+ case string:
+ msg = catalog.String(x)
+ case catalog.Message:
+ msg = x
+ default:
+ return fmt.Errorf("plural: message of type %T; must be string or catalog.Message", x)
+ }
+ if err := e.EncodeMessage(msg); err != nil {
+ return err
+ }
+ i++
+ }
+ return nil
+}
+
+func compileSelector(e *catmsg.Encoder, valid []Form, selector interface{}) error {
+ form := Other
+ switch x := selector.(type) {
+ case string:
+ if x == "" {
+ return fmt.Errorf("plural: empty selector")
+ }
+ if c := x[0]; c == '=' || c == '<' {
+ val, err := strconv.ParseUint(x[1:], 10, 16)
+ if err != nil {
+ return fmt.Errorf("plural: invalid number in selector %q: %v", selector, err)
+ }
+ e.EncodeUint(uint64(c))
+ e.EncodeUint(val)
+ return nil
+ }
+ var ok bool
+ form, ok = countMap[x]
+ if !ok {
+ return fmt.Errorf("plural: invalid plural form %q", selector)
+ }
+ case Form:
+ form = x
+ default:
+ return fmt.Errorf("plural: selector of type %T; want string or Form", selector)
+ }
+
+ ok := false
+ for _, f := range valid {
+ if f == form {
+ ok = true
+ break
+ }
+ }
+ if !ok {
+ return fmt.Errorf("plural: form %q not supported for language %q", selector, e.Language())
+ }
+ e.EncodeUint(uint64(form))
+ return nil
+}
+
+func execute(d *catmsg.Decoder) bool {
+ lang := d.Language()
+ argN := int(d.DecodeUint())
+ kind := int(d.DecodeUint())
+ scale := -1 // default
+ if kind > kindDefault {
+ scale = int(d.DecodeUint())
+ }
+ form := Other
+ n := -1
+ if arg := d.Arg(argN); arg == nil {
+ // Default to Other.
+ } else if x, ok := arg.(number.VisibleDigits); ok {
+ d := x.Digits(nil, lang, scale)
+ form, n = cardinal.matchDisplayDigits(lang, &d)
+ } else if x, ok := arg.(Interface); ok {
+ // This covers lists and formatters from the number package.
+ form, n = x.PluralForm(lang, scale)
+ } else {
+ var f number.Formatter
+ switch kind {
+ case kindScale:
+ f.InitDecimal(lang)
+ f.SetScale(scale)
+ case kindScientific:
+ f.InitScientific(lang)
+ f.SetScale(scale)
+ case kindPrecision:
+ f.InitDecimal(lang)
+ f.SetPrecision(scale)
+ case kindDefault:
+ // sensible default
+ f.InitDecimal(lang)
+ if k := reflect.TypeOf(arg).Kind(); reflect.Int <= k && k <= reflect.Uintptr {
+ f.SetScale(0)
+ } else {
+ f.SetScale(2)
+ }
+ }
+ var dec number.Decimal // TODO: buffer in Printer
+ dec.Convert(f.RoundingContext, arg)
+ v := number.FormatDigits(&dec, f.RoundingContext)
+ if !v.NaN && !v.Inf {
+ form, n = cardinal.matchDisplayDigits(d.Language(), &v)
+ }
+ }
+ for !d.Done() {
+ f := d.DecodeUint()
+ if (f == '=' && n == int(d.DecodeUint())) ||
+ (f == '<' && 0 <= n && n < int(d.DecodeUint())) ||
+ form == Form(f) ||
+ Other == Form(f) {
+ return d.ExecuteMessage()
+ }
+ d.SkipMessage()
+ }
+ return false
+}
diff --git a/vendor/golang.org/x/text/feature/plural/plural.go b/vendor/golang.org/x/text/feature/plural/plural.go
new file mode 100644
index 000000000..e9f2d42e0
--- /dev/null
+++ b/vendor/golang.org/x/text/feature/plural/plural.go
@@ -0,0 +1,262 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate go run gen.go gen_common.go
+
+// Package plural provides utilities for handling linguistic plurals in text.
+//
+// The definitions in this package are based on the plural rule handling defined
+// in CLDR. See
+// https://unicode.org/reports/tr35/tr35-numbers.html#Language_Plural_Rules for
+// details.
+package plural
+
+import (
+ "golang.org/x/text/internal/language/compact"
+ "golang.org/x/text/internal/number"
+ "golang.org/x/text/language"
+)
+
+// Rules defines the plural rules for all languages for a certain plural type.
+//
+// This package is UNDER CONSTRUCTION and its API may change.
+type Rules struct {
+ rules []pluralCheck
+ index []byte
+ langToIndex []byte
+ inclusionMasks []uint64
+}
+
+var (
+ // Cardinal defines the plural rules for numbers indicating quantities.
+ Cardinal *Rules = cardinal
+
+ // Ordinal defines the plural rules for numbers indicating position
+ // (first, second, etc.).
+ Ordinal *Rules = ordinal
+
+ ordinal = &Rules{
+ ordinalRules,
+ ordinalIndex,
+ ordinalLangToIndex,
+ ordinalInclusionMasks[:],
+ }
+
+ cardinal = &Rules{
+ cardinalRules,
+ cardinalIndex,
+ cardinalLangToIndex,
+ cardinalInclusionMasks[:],
+ }
+)
+
+// getIntApprox converts the digits in slice digits[start:end] to an integer
+// according to the following rules:
+// - Let i be asInt(digits[start:end]), where out-of-range digits are assumed
+// to be zero.
+// - Result n is big if i / 10^nMod > 1.
+// - Otherwise the result is i % 10^nMod.
+//
+// For example, if digits is {1, 2, 3} and start:end is 0:5, then the result
+// for various values of nMod is:
+// - when nMod == 2, n == big
+// - when nMod == 3, n == big
+// - when nMod == 4, n == big
+// - when nMod == 5, n == 12300
+// - when nMod == 6, n == 12300
+// - when nMod == 7, n == 12300
+func getIntApprox(digits []byte, start, end, nMod, big int) (n int) {
+ // Leading 0 digits just result in 0.
+ p := start
+ if p < 0 {
+ p = 0
+ }
+ // Range only over the part for which we have digits.
+ mid := end
+ if mid >= len(digits) {
+ mid = len(digits)
+ }
+ // Check digits more significant that nMod.
+ if q := end - nMod; q > 0 {
+ if q > mid {
+ q = mid
+ }
+ for ; p < q; p++ {
+ if digits[p] != 0 {
+ return big
+ }
+ }
+ }
+ for ; p < mid; p++ {
+ n = 10*n + int(digits[p])
+ }
+ // Multiply for trailing zeros.
+ for ; p < end; p++ {
+ n *= 10
+ }
+ return n
+}
+
+// MatchDigits computes the plural form for the given language and the given
+// decimal floating point digits. The digits are stored in big-endian order and
+// are of value byte(0) - byte(9). The floating point position is indicated by
+// exp and the number of visible decimals is scale. All leading and trailing
+// zeros may be omitted from digits.
+//
+// The following table contains examples of possible arguments to represent
+// the given numbers.
+//
+// decimal digits exp scale
+// 123 []byte{1, 2, 3} 3 0
+// 123.4 []byte{1, 2, 3, 4} 3 1
+// 123.40 []byte{1, 2, 3, 4} 3 2
+// 100000 []byte{1} 6 0
+// 100000.00 []byte{1} 6 3
+func (p *Rules) MatchDigits(t language.Tag, digits []byte, exp, scale int) Form {
+ index := tagToID(t)
+
+ // Differentiate up to including mod 1000000 for the integer part.
+ n := getIntApprox(digits, 0, exp, 6, 1000000)
+
+ // Differentiate up to including mod 100 for the fractional part.
+ f := getIntApprox(digits, exp, exp+scale, 2, 100)
+
+ return matchPlural(p, index, n, f, scale)
+}
+
+func (p *Rules) matchDisplayDigits(t language.Tag, d *number.Digits) (Form, int) {
+ n := getIntApprox(d.Digits, 0, int(d.Exp), 6, 1000000)
+ return p.MatchDigits(t, d.Digits, int(d.Exp), d.NumFracDigits()), n
+}
+
+func validForms(p *Rules, t language.Tag) (forms []Form) {
+ offset := p.langToIndex[tagToID(t)]
+ rules := p.rules[p.index[offset]:p.index[offset+1]]
+
+ forms = append(forms, Other)
+ last := Other
+ for _, r := range rules {
+ if cat := Form(r.cat & formMask); cat != andNext && last != cat {
+ forms = append(forms, cat)
+ last = cat
+ }
+ }
+ return forms
+}
+
+func (p *Rules) matchComponents(t language.Tag, n, f, scale int) Form {
+ return matchPlural(p, tagToID(t), n, f, scale)
+}
+
+// MatchPlural returns the plural form for the given language and plural
+// operands (as defined in
+// https://unicode.org/reports/tr35/tr35-numbers.html#Language_Plural_Rules):
+//
+// where
+// n absolute value of the source number (integer and decimals)
+// input
+// i integer digits of n.
+// v number of visible fraction digits in n, with trailing zeros.
+// w number of visible fraction digits in n, without trailing zeros.
+// f visible fractional digits in n, with trailing zeros (f = t * 10^(v-w))
+// t visible fractional digits in n, without trailing zeros.
+//
+// If any of the operand values is too large to fit in an int, it is okay to
+// pass the value modulo 10,000,000.
+func (p *Rules) MatchPlural(lang language.Tag, i, v, w, f, t int) Form {
+ return matchPlural(p, tagToID(lang), i, f, v)
+}
+
+func matchPlural(p *Rules, index compact.ID, n, f, v int) Form {
+ nMask := p.inclusionMasks[n%maxMod]
+ // Compute the fMask inline in the rules below, as it is relatively rare.
+ // fMask := p.inclusionMasks[f%maxMod]
+ vMask := p.inclusionMasks[v%maxMod]
+
+ // Do the matching
+ offset := p.langToIndex[index]
+ rules := p.rules[p.index[offset]:p.index[offset+1]]
+ for i := 0; i < len(rules); i++ {
+ rule := rules[i]
+ setBit := uint64(1 << rule.setID)
+ var skip bool
+ switch op := opID(rule.cat >> opShift); op {
+ case opI: // i = x
+ skip = n >= numN || nMask&setBit == 0
+
+ case opI | opNotEqual: // i != x
+ skip = n < numN && nMask&setBit != 0
+
+ case opI | opMod: // i % m = x
+ skip = nMask&setBit == 0
+
+ case opI | opMod | opNotEqual: // i % m != x
+ skip = nMask&setBit != 0
+
+ case opN: // n = x
+ skip = f != 0 || n >= numN || nMask&setBit == 0
+
+ case opN | opNotEqual: // n != x
+ skip = f == 0 && n < numN && nMask&setBit != 0
+
+ case opN | opMod: // n % m = x
+ skip = f != 0 || nMask&setBit == 0
+
+ case opN | opMod | opNotEqual: // n % m != x
+ skip = f == 0 && nMask&setBit != 0
+
+ case opF: // f = x
+ skip = f >= numN || p.inclusionMasks[f%maxMod]&setBit == 0
+
+ case opF | opNotEqual: // f != x
+ skip = f < numN && p.inclusionMasks[f%maxMod]&setBit != 0
+
+ case opF | opMod: // f % m = x
+ skip = p.inclusionMasks[f%maxMod]&setBit == 0
+
+ case opF | opMod | opNotEqual: // f % m != x
+ skip = p.inclusionMasks[f%maxMod]&setBit != 0
+
+ case opV: // v = x
+ skip = v < numN && vMask&setBit == 0
+
+ case opV | opNotEqual: // v != x
+ skip = v < numN && vMask&setBit != 0
+
+ case opW: // w == 0
+ skip = f != 0
+
+ case opW | opNotEqual: // w != 0
+ skip = f == 0
+
+ // Hard-wired rules that cannot be handled by our algorithm.
+
+ case opBretonM:
+ skip = f != 0 || n == 0 || n%1000000 != 0
+
+ case opAzerbaijan00s:
+ // 100,200,300,400,500,600,700,800,900
+ skip = n == 0 || n >= 1000 || n%100 != 0
+
+ case opItalian800:
+ skip = (f != 0 || n >= numN || nMask&setBit == 0) && n != 800
+ }
+ if skip {
+ // advance over AND entries.
+ for ; i < len(rules) && rules[i].cat&formMask == andNext; i++ {
+ }
+ continue
+ }
+ // return if we have a final entry.
+ if cat := rule.cat & formMask; cat != andNext {
+ return Form(cat)
+ }
+ }
+ return Other
+}
+
+func tagToID(t language.Tag) compact.ID {
+ id, _ := compact.RegionalID(compact.Tag(t))
+ return id
+}
diff --git a/vendor/golang.org/x/text/feature/plural/tables.go b/vendor/golang.org/x/text/feature/plural/tables.go
new file mode 100644
index 000000000..b06b9cb4e
--- /dev/null
+++ b/vendor/golang.org/x/text/feature/plural/tables.go
@@ -0,0 +1,552 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+package plural
+
+// CLDRVersion is the CLDR version from which the tables in this package are derived.
+const CLDRVersion = "32"
+
+var ordinalRules = []pluralCheck{ // 64 elements
+ 0: {cat: 0x2f, setID: 0x4},
+ 1: {cat: 0x3a, setID: 0x5},
+ 2: {cat: 0x22, setID: 0x1},
+ 3: {cat: 0x22, setID: 0x6},
+ 4: {cat: 0x22, setID: 0x7},
+ 5: {cat: 0x2f, setID: 0x8},
+ 6: {cat: 0x3c, setID: 0x9},
+ 7: {cat: 0x2f, setID: 0xa},
+ 8: {cat: 0x3c, setID: 0xb},
+ 9: {cat: 0x2c, setID: 0xc},
+ 10: {cat: 0x24, setID: 0xd},
+ 11: {cat: 0x2d, setID: 0xe},
+ 12: {cat: 0x2d, setID: 0xf},
+ 13: {cat: 0x2f, setID: 0x10},
+ 14: {cat: 0x35, setID: 0x3},
+ 15: {cat: 0xc5, setID: 0x11},
+ 16: {cat: 0x2, setID: 0x1},
+ 17: {cat: 0x5, setID: 0x3},
+ 18: {cat: 0xd, setID: 0x12},
+ 19: {cat: 0x22, setID: 0x1},
+ 20: {cat: 0x2f, setID: 0x13},
+ 21: {cat: 0x3d, setID: 0x14},
+ 22: {cat: 0x2f, setID: 0x15},
+ 23: {cat: 0x3a, setID: 0x16},
+ 24: {cat: 0x2f, setID: 0x17},
+ 25: {cat: 0x3b, setID: 0x18},
+ 26: {cat: 0x2f, setID: 0xa},
+ 27: {cat: 0x3c, setID: 0xb},
+ 28: {cat: 0x22, setID: 0x1},
+ 29: {cat: 0x23, setID: 0x19},
+ 30: {cat: 0x24, setID: 0x1a},
+ 31: {cat: 0x22, setID: 0x1b},
+ 32: {cat: 0x23, setID: 0x2},
+ 33: {cat: 0x24, setID: 0x1a},
+ 34: {cat: 0xf, setID: 0x15},
+ 35: {cat: 0x1a, setID: 0x16},
+ 36: {cat: 0xf, setID: 0x17},
+ 37: {cat: 0x1b, setID: 0x18},
+ 38: {cat: 0xf, setID: 0x1c},
+ 39: {cat: 0x1d, setID: 0x1d},
+ 40: {cat: 0xa, setID: 0x1e},
+ 41: {cat: 0xa, setID: 0x1f},
+ 42: {cat: 0xc, setID: 0x20},
+ 43: {cat: 0xe4, setID: 0x0},
+ 44: {cat: 0x5, setID: 0x3},
+ 45: {cat: 0xd, setID: 0xe},
+ 46: {cat: 0xd, setID: 0x21},
+ 47: {cat: 0x22, setID: 0x1},
+ 48: {cat: 0x23, setID: 0x19},
+ 49: {cat: 0x24, setID: 0x1a},
+ 50: {cat: 0x25, setID: 0x22},
+ 51: {cat: 0x22, setID: 0x23},
+ 52: {cat: 0x23, setID: 0x19},
+ 53: {cat: 0x24, setID: 0x1a},
+ 54: {cat: 0x25, setID: 0x22},
+ 55: {cat: 0x22, setID: 0x24},
+ 56: {cat: 0x23, setID: 0x19},
+ 57: {cat: 0x24, setID: 0x1a},
+ 58: {cat: 0x25, setID: 0x22},
+ 59: {cat: 0x21, setID: 0x25},
+ 60: {cat: 0x22, setID: 0x1},
+ 61: {cat: 0x23, setID: 0x2},
+ 62: {cat: 0x24, setID: 0x26},
+ 63: {cat: 0x25, setID: 0x27},
+} // Size: 152 bytes
+
+var ordinalIndex = []uint8{ // 22 elements
+ 0x00, 0x00, 0x02, 0x03, 0x04, 0x05, 0x07, 0x09,
+ 0x0b, 0x0f, 0x10, 0x13, 0x16, 0x1c, 0x1f, 0x22,
+ 0x28, 0x2f, 0x33, 0x37, 0x3b, 0x40,
+} // Size: 46 bytes
+
+var ordinalLangToIndex = []uint8{ // 775 elements
+ // Entry 0 - 3F
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x12, 0x12, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10,
+ 0x10, 0x10, 0x10, 0x00, 0x00, 0x05, 0x05, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ // Entry 40 - 7F
+ 0x12, 0x12, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e,
+ 0x0e, 0x0e, 0x0e, 0x0e, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x14, 0x14, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ // Entry 80 - BF
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
+ // Entry C0 - FF
+ 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ // Entry 100 - 13F
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x02,
+ 0x00, 0x00, 0x00, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ // Entry 140 - 17F
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x00, 0x00, 0x00, 0x00, 0x02, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x11, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11,
+ 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x03,
+ 0x02, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ // Entry 180 - 1BF
+ 0x00, 0x00, 0x00, 0x00, 0x09, 0x09, 0x09, 0x09,
+ 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x0a, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ // Entry 1C0 - 1FF
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x02, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x0f, 0x0f, 0x00, 0x00,
+ 0x00, 0x00, 0x02, 0x0d, 0x0d, 0x02, 0x02, 0x02,
+ 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ // Entry 200 - 23F
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x04, 0x04, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x13, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ // Entry 240 - 27F
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02,
+ 0x02, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ // Entry 280 - 2BF
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x0b, 0x0b, 0x0b, 0x0b, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01,
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x07, 0x07, 0x02, 0x00, 0x00, 0x00, 0x00,
+ // Entry 2C0 - 2FF
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x06, 0x06, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x02, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ // Entry 300 - 33F
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x0c,
+} // Size: 799 bytes
+
+var ordinalInclusionMasks = []uint64{ // 100 elements
+ // Entry 0 - 1F
+ 0x0000002000010009, 0x00000018482000d3, 0x0000000042840195, 0x000000410a040581,
+ 0x00000041040c0081, 0x0000009840040041, 0x0000008400045001, 0x0000003850040001,
+ 0x0000003850060001, 0x0000003800049001, 0x0000000800052001, 0x0000000040660031,
+ 0x0000000041840331, 0x0000000100040f01, 0x00000001001c0001, 0x0000000040040001,
+ 0x0000000000045001, 0x0000000070040001, 0x0000000070040001, 0x0000000000049001,
+ 0x0000000080050001, 0x0000000040200011, 0x0000000040800111, 0x0000000100000501,
+ 0x0000000100080001, 0x0000000040000001, 0x0000000000005001, 0x0000000050000001,
+ 0x0000000050000001, 0x0000000000009001, 0x0000000000010001, 0x0000000040200011,
+ // Entry 20 - 3F
+ 0x0000000040800111, 0x0000000100000501, 0x0000000100080001, 0x0000000040000001,
+ 0x0000000000005001, 0x0000000050000001, 0x0000000050000001, 0x0000000000009001,
+ 0x0000000200050001, 0x0000000040200011, 0x0000000040800111, 0x0000000100000501,
+ 0x0000000100080001, 0x0000000040000001, 0x0000000000005001, 0x0000000050000001,
+ 0x0000000050000001, 0x0000000000009001, 0x0000000080010001, 0x0000000040200011,
+ 0x0000000040800111, 0x0000000100000501, 0x0000000100080001, 0x0000000040000001,
+ 0x0000000000005001, 0x0000000050000001, 0x0000000050000001, 0x0000000000009001,
+ 0x0000000200050001, 0x0000000040200011, 0x0000000040800111, 0x0000000100000501,
+ // Entry 40 - 5F
+ 0x0000000100080001, 0x0000000040000001, 0x0000000000005001, 0x0000000050000001,
+ 0x0000000050000001, 0x0000000000009001, 0x0000000080010001, 0x0000000040200011,
+ 0x0000000040800111, 0x0000000100000501, 0x0000000100080001, 0x0000000040000001,
+ 0x0000000000005001, 0x0000000050000001, 0x0000000050000001, 0x0000000000009001,
+ 0x0000000080070001, 0x0000000040200011, 0x0000000040800111, 0x0000000100000501,
+ 0x0000000100080001, 0x0000000040000001, 0x0000000000005001, 0x0000000050000001,
+ 0x0000000050000001, 0x0000000000009001, 0x0000000200010001, 0x0000000040200011,
+ 0x0000000040800111, 0x0000000100000501, 0x0000000100080001, 0x0000000040000001,
+ // Entry 60 - 7F
+ 0x0000000000005001, 0x0000000050000001, 0x0000000050000001, 0x0000000000009001,
+} // Size: 824 bytes
+
+// Slots used for ordinal: 40 of 0xFF rules; 16 of 0xFF indexes; 40 of 64 sets
+
+var cardinalRules = []pluralCheck{ // 166 elements
+ 0: {cat: 0x2, setID: 0x3},
+ 1: {cat: 0x22, setID: 0x1},
+ 2: {cat: 0x2, setID: 0x4},
+ 3: {cat: 0x2, setID: 0x4},
+ 4: {cat: 0x7, setID: 0x1},
+ 5: {cat: 0x62, setID: 0x3},
+ 6: {cat: 0x22, setID: 0x4},
+ 7: {cat: 0x7, setID: 0x3},
+ 8: {cat: 0x42, setID: 0x1},
+ 9: {cat: 0x22, setID: 0x4},
+ 10: {cat: 0x22, setID: 0x4},
+ 11: {cat: 0x22, setID: 0x5},
+ 12: {cat: 0x22, setID: 0x1},
+ 13: {cat: 0x22, setID: 0x1},
+ 14: {cat: 0x7, setID: 0x4},
+ 15: {cat: 0x92, setID: 0x3},
+ 16: {cat: 0xf, setID: 0x6},
+ 17: {cat: 0x1f, setID: 0x7},
+ 18: {cat: 0x82, setID: 0x3},
+ 19: {cat: 0x92, setID: 0x3},
+ 20: {cat: 0xf, setID: 0x6},
+ 21: {cat: 0x62, setID: 0x3},
+ 22: {cat: 0x4a, setID: 0x6},
+ 23: {cat: 0x7, setID: 0x8},
+ 24: {cat: 0x62, setID: 0x3},
+ 25: {cat: 0x1f, setID: 0x9},
+ 26: {cat: 0x62, setID: 0x3},
+ 27: {cat: 0x5f, setID: 0x9},
+ 28: {cat: 0x72, setID: 0x3},
+ 29: {cat: 0x29, setID: 0xa},
+ 30: {cat: 0x29, setID: 0xb},
+ 31: {cat: 0x4f, setID: 0xb},
+ 32: {cat: 0x61, setID: 0x2},
+ 33: {cat: 0x2f, setID: 0x6},
+ 34: {cat: 0x3a, setID: 0x7},
+ 35: {cat: 0x4f, setID: 0x6},
+ 36: {cat: 0x5f, setID: 0x7},
+ 37: {cat: 0x62, setID: 0x2},
+ 38: {cat: 0x4f, setID: 0x6},
+ 39: {cat: 0x72, setID: 0x2},
+ 40: {cat: 0x21, setID: 0x3},
+ 41: {cat: 0x7, setID: 0x4},
+ 42: {cat: 0x32, setID: 0x3},
+ 43: {cat: 0x21, setID: 0x3},
+ 44: {cat: 0x22, setID: 0x1},
+ 45: {cat: 0x22, setID: 0x1},
+ 46: {cat: 0x23, setID: 0x2},
+ 47: {cat: 0x2, setID: 0x3},
+ 48: {cat: 0x22, setID: 0x1},
+ 49: {cat: 0x24, setID: 0xc},
+ 50: {cat: 0x7, setID: 0x1},
+ 51: {cat: 0x62, setID: 0x3},
+ 52: {cat: 0x74, setID: 0x3},
+ 53: {cat: 0x24, setID: 0x3},
+ 54: {cat: 0x2f, setID: 0xd},
+ 55: {cat: 0x34, setID: 0x1},
+ 56: {cat: 0xf, setID: 0x6},
+ 57: {cat: 0x1f, setID: 0x7},
+ 58: {cat: 0x62, setID: 0x3},
+ 59: {cat: 0x4f, setID: 0x6},
+ 60: {cat: 0x5a, setID: 0x7},
+ 61: {cat: 0xf, setID: 0xe},
+ 62: {cat: 0x1f, setID: 0xf},
+ 63: {cat: 0x64, setID: 0x3},
+ 64: {cat: 0x4f, setID: 0xe},
+ 65: {cat: 0x5c, setID: 0xf},
+ 66: {cat: 0x22, setID: 0x10},
+ 67: {cat: 0x23, setID: 0x11},
+ 68: {cat: 0x24, setID: 0x12},
+ 69: {cat: 0xf, setID: 0x1},
+ 70: {cat: 0x62, setID: 0x3},
+ 71: {cat: 0xf, setID: 0x2},
+ 72: {cat: 0x63, setID: 0x3},
+ 73: {cat: 0xf, setID: 0x13},
+ 74: {cat: 0x64, setID: 0x3},
+ 75: {cat: 0x74, setID: 0x3},
+ 76: {cat: 0xf, setID: 0x1},
+ 77: {cat: 0x62, setID: 0x3},
+ 78: {cat: 0x4a, setID: 0x1},
+ 79: {cat: 0xf, setID: 0x2},
+ 80: {cat: 0x63, setID: 0x3},
+ 81: {cat: 0x4b, setID: 0x2},
+ 82: {cat: 0xf, setID: 0x13},
+ 83: {cat: 0x64, setID: 0x3},
+ 84: {cat: 0x4c, setID: 0x13},
+ 85: {cat: 0x7, setID: 0x1},
+ 86: {cat: 0x62, setID: 0x3},
+ 87: {cat: 0x7, setID: 0x2},
+ 88: {cat: 0x63, setID: 0x3},
+ 89: {cat: 0x2f, setID: 0xa},
+ 90: {cat: 0x37, setID: 0x14},
+ 91: {cat: 0x65, setID: 0x3},
+ 92: {cat: 0x7, setID: 0x1},
+ 93: {cat: 0x62, setID: 0x3},
+ 94: {cat: 0x7, setID: 0x15},
+ 95: {cat: 0x64, setID: 0x3},
+ 96: {cat: 0x75, setID: 0x3},
+ 97: {cat: 0x7, setID: 0x1},
+ 98: {cat: 0x62, setID: 0x3},
+ 99: {cat: 0xf, setID: 0xe},
+ 100: {cat: 0x1f, setID: 0xf},
+ 101: {cat: 0x64, setID: 0x3},
+ 102: {cat: 0xf, setID: 0x16},
+ 103: {cat: 0x17, setID: 0x1},
+ 104: {cat: 0x65, setID: 0x3},
+ 105: {cat: 0xf, setID: 0x17},
+ 106: {cat: 0x65, setID: 0x3},
+ 107: {cat: 0xf, setID: 0xf},
+ 108: {cat: 0x65, setID: 0x3},
+ 109: {cat: 0x2f, setID: 0x6},
+ 110: {cat: 0x3a, setID: 0x7},
+ 111: {cat: 0x2f, setID: 0xe},
+ 112: {cat: 0x3c, setID: 0xf},
+ 113: {cat: 0x2d, setID: 0xa},
+ 114: {cat: 0x2d, setID: 0x17},
+ 115: {cat: 0x2d, setID: 0x18},
+ 116: {cat: 0x2f, setID: 0x6},
+ 117: {cat: 0x3a, setID: 0xb},
+ 118: {cat: 0x2f, setID: 0x19},
+ 119: {cat: 0x3c, setID: 0xb},
+ 120: {cat: 0x55, setID: 0x3},
+ 121: {cat: 0x22, setID: 0x1},
+ 122: {cat: 0x24, setID: 0x3},
+ 123: {cat: 0x2c, setID: 0xc},
+ 124: {cat: 0x2d, setID: 0xb},
+ 125: {cat: 0xf, setID: 0x6},
+ 126: {cat: 0x1f, setID: 0x7},
+ 127: {cat: 0x62, setID: 0x3},
+ 128: {cat: 0xf, setID: 0xe},
+ 129: {cat: 0x1f, setID: 0xf},
+ 130: {cat: 0x64, setID: 0x3},
+ 131: {cat: 0xf, setID: 0xa},
+ 132: {cat: 0x65, setID: 0x3},
+ 133: {cat: 0xf, setID: 0x17},
+ 134: {cat: 0x65, setID: 0x3},
+ 135: {cat: 0xf, setID: 0x18},
+ 136: {cat: 0x65, setID: 0x3},
+ 137: {cat: 0x2f, setID: 0x6},
+ 138: {cat: 0x3a, setID: 0x1a},
+ 139: {cat: 0x2f, setID: 0x1b},
+ 140: {cat: 0x3b, setID: 0x1c},
+ 141: {cat: 0x2f, setID: 0x1d},
+ 142: {cat: 0x3c, setID: 0x1e},
+ 143: {cat: 0x37, setID: 0x3},
+ 144: {cat: 0xa5, setID: 0x0},
+ 145: {cat: 0x22, setID: 0x1},
+ 146: {cat: 0x23, setID: 0x2},
+ 147: {cat: 0x24, setID: 0x1f},
+ 148: {cat: 0x25, setID: 0x20},
+ 149: {cat: 0xf, setID: 0x6},
+ 150: {cat: 0x62, setID: 0x3},
+ 151: {cat: 0xf, setID: 0x1b},
+ 152: {cat: 0x63, setID: 0x3},
+ 153: {cat: 0xf, setID: 0x21},
+ 154: {cat: 0x64, setID: 0x3},
+ 155: {cat: 0x75, setID: 0x3},
+ 156: {cat: 0x21, setID: 0x3},
+ 157: {cat: 0x22, setID: 0x1},
+ 158: {cat: 0x23, setID: 0x2},
+ 159: {cat: 0x2c, setID: 0x22},
+ 160: {cat: 0x2d, setID: 0x5},
+ 161: {cat: 0x21, setID: 0x3},
+ 162: {cat: 0x22, setID: 0x1},
+ 163: {cat: 0x23, setID: 0x2},
+ 164: {cat: 0x24, setID: 0x23},
+ 165: {cat: 0x25, setID: 0x24},
+} // Size: 356 bytes
+
+var cardinalIndex = []uint8{ // 36 elements
+ 0x00, 0x00, 0x02, 0x03, 0x04, 0x06, 0x09, 0x0a,
+ 0x0c, 0x0d, 0x10, 0x14, 0x17, 0x1d, 0x28, 0x2b,
+ 0x2d, 0x2f, 0x32, 0x38, 0x42, 0x45, 0x4c, 0x55,
+ 0x5c, 0x61, 0x6d, 0x74, 0x79, 0x7d, 0x89, 0x91,
+ 0x95, 0x9c, 0xa1, 0xa6,
+} // Size: 60 bytes
+
+var cardinalLangToIndex = []uint8{ // 775 elements
+ // Entry 0 - 3F
+ 0x00, 0x08, 0x08, 0x08, 0x00, 0x00, 0x06, 0x06,
+ 0x01, 0x01, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21,
+ 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21,
+ 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21,
+ 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21,
+ 0x01, 0x01, 0x08, 0x08, 0x04, 0x04, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x00, 0x00, 0x1a, 0x1a, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x06, 0x00, 0x00,
+ // Entry 40 - 7F
+ 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x1e, 0x1e,
+ 0x08, 0x08, 0x13, 0x13, 0x13, 0x13, 0x13, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x00, 0x00, 0x00, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+ 0x18, 0x18, 0x00, 0x00, 0x22, 0x22, 0x09, 0x09,
+ 0x09, 0x00, 0x00, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x00, 0x00, 0x16, 0x16, 0x00,
+ 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ // Entry 80 - BF
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ // Entry C0 - FF
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+ // Entry 100 - 13F
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04,
+ 0x08, 0x08, 0x00, 0x00, 0x01, 0x01, 0x01, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x04, 0x04, 0x0c, 0x0c,
+ 0x08, 0x08, 0x08, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ // Entry 140 - 17F
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x08, 0x08, 0x04, 0x04, 0x1f, 0x1f,
+ 0x14, 0x14, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08,
+ 0x01, 0x01, 0x06, 0x00, 0x00, 0x20, 0x20, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x17, 0x17, 0x01,
+ 0x01, 0x13, 0x13, 0x13, 0x16, 0x16, 0x08, 0x08,
+ 0x02, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ // Entry 180 - 1BF
+ 0x00, 0x04, 0x0a, 0x0a, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x10, 0x17, 0x00, 0x00, 0x00, 0x08, 0x08,
+ 0x04, 0x08, 0x08, 0x00, 0x00, 0x08, 0x08, 0x02,
+ 0x02, 0x08, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08,
+ 0x08, 0x08, 0x00, 0x00, 0x0f, 0x0f, 0x08, 0x10,
+ // Entry 1C0 - 1FF
+ 0x10, 0x08, 0x08, 0x0e, 0x0e, 0x08, 0x08, 0x08,
+ 0x08, 0x00, 0x00, 0x06, 0x06, 0x06, 0x06, 0x06,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x1b, 0x1b, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x0d, 0x08,
+ 0x08, 0x08, 0x00, 0x00, 0x00, 0x00, 0x06, 0x06,
+ 0x00, 0x00, 0x08, 0x08, 0x0b, 0x0b, 0x08, 0x08,
+ 0x08, 0x08, 0x12, 0x01, 0x01, 0x00, 0x00, 0x00,
+ 0x00, 0x1c, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00,
+ // Entry 200 - 23F
+ 0x00, 0x08, 0x10, 0x10, 0x08, 0x08, 0x08, 0x08,
+ 0x08, 0x00, 0x00, 0x00, 0x08, 0x08, 0x08, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x00,
+ 0x00, 0x08, 0x08, 0x08, 0x08, 0x08, 0x00, 0x08,
+ 0x06, 0x00, 0x00, 0x08, 0x08, 0x08, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x06, 0x06,
+ 0x06, 0x06, 0x06, 0x08, 0x19, 0x19, 0x0d, 0x0d,
+ 0x08, 0x08, 0x03, 0x04, 0x03, 0x04, 0x04, 0x04,
+ // Entry 240 - 27F
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x08, 0x08, 0x00, 0x00, 0x12,
+ 0x12, 0x12, 0x08, 0x08, 0x1d, 0x1d, 0x1d, 0x1d,
+ 0x1d, 0x1d, 0x1d, 0x00, 0x00, 0x08, 0x08, 0x00,
+ 0x00, 0x08, 0x08, 0x00, 0x00, 0x08, 0x08, 0x08,
+ 0x10, 0x10, 0x10, 0x10, 0x08, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x13, 0x11, 0x11, 0x11, 0x11, 0x11,
+ 0x05, 0x05, 0x18, 0x18, 0x15, 0x15, 0x10, 0x10,
+ // Entry 280 - 2BF
+ 0x10, 0x10, 0x10, 0x10, 0x08, 0x08, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x13,
+ 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
+ 0x13, 0x13, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+ 0x08, 0x00, 0x00, 0x00, 0x00, 0x06, 0x06, 0x06,
+ 0x08, 0x08, 0x08, 0x0c, 0x08, 0x00, 0x00, 0x08,
+ // Entry 2C0 - 2FF
+ 0x08, 0x08, 0x08, 0x00, 0x00, 0x00, 0x00, 0x07,
+ 0x07, 0x08, 0x08, 0x1d, 0x1d, 0x04, 0x04, 0x04,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x08,
+ 0x08, 0x08, 0x08, 0x06, 0x08, 0x08, 0x00, 0x00,
+ 0x08, 0x08, 0x08, 0x00, 0x00, 0x04, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ // Entry 300 - 33F
+ 0x00, 0x00, 0x00, 0x01, 0x01, 0x04, 0x04,
+} // Size: 799 bytes
+
+var cardinalInclusionMasks = []uint64{ // 100 elements
+ // Entry 0 - 1F
+ 0x0000000200500419, 0x0000000000512153, 0x000000000a327105, 0x0000000ca23c7101,
+ 0x00000004a23c7201, 0x0000000482943001, 0x0000001482943201, 0x0000000502943001,
+ 0x0000000502943001, 0x0000000522943201, 0x0000000540543401, 0x00000000454128e1,
+ 0x000000005b02e821, 0x000000006304e821, 0x000000006304ea21, 0x0000000042842821,
+ 0x0000000042842a21, 0x0000000042842821, 0x0000000042842821, 0x0000000062842a21,
+ 0x0000000200400421, 0x0000000000400061, 0x000000000a004021, 0x0000000022004021,
+ 0x0000000022004221, 0x0000000002800021, 0x0000000002800221, 0x0000000002800021,
+ 0x0000000002800021, 0x0000000022800221, 0x0000000000400421, 0x0000000000400061,
+ // Entry 20 - 3F
+ 0x000000000a004021, 0x0000000022004021, 0x0000000022004221, 0x0000000002800021,
+ 0x0000000002800221, 0x0000000002800021, 0x0000000002800021, 0x0000000022800221,
+ 0x0000000200400421, 0x0000000000400061, 0x000000000a004021, 0x0000000022004021,
+ 0x0000000022004221, 0x0000000002800021, 0x0000000002800221, 0x0000000002800021,
+ 0x0000000002800021, 0x0000000022800221, 0x0000000000400421, 0x0000000000400061,
+ 0x000000000a004021, 0x0000000022004021, 0x0000000022004221, 0x0000000002800021,
+ 0x0000000002800221, 0x0000000002800021, 0x0000000002800021, 0x0000000022800221,
+ 0x0000000200400421, 0x0000000000400061, 0x000000000a004021, 0x0000000022004021,
+ // Entry 40 - 5F
+ 0x0000000022004221, 0x0000000002800021, 0x0000000002800221, 0x0000000002800021,
+ 0x0000000002800021, 0x0000000022800221, 0x0000000040400421, 0x0000000044400061,
+ 0x000000005a004021, 0x0000000062004021, 0x0000000062004221, 0x0000000042800021,
+ 0x0000000042800221, 0x0000000042800021, 0x0000000042800021, 0x0000000062800221,
+ 0x0000000200400421, 0x0000000000400061, 0x000000000a004021, 0x0000000022004021,
+ 0x0000000022004221, 0x0000000002800021, 0x0000000002800221, 0x0000000002800021,
+ 0x0000000002800021, 0x0000000022800221, 0x0000000040400421, 0x0000000044400061,
+ 0x000000005a004021, 0x0000000062004021, 0x0000000062004221, 0x0000000042800021,
+ // Entry 60 - 7F
+ 0x0000000042800221, 0x0000000042800021, 0x0000000042800021, 0x0000000062800221,
+} // Size: 824 bytes
+
+// Slots used for cardinal: A6 of 0xFF rules; 24 of 0xFF indexes; 37 of 64 sets
+
+// Total table size 3860 bytes (3KiB); checksum: AAFBF21
diff --git a/vendor/golang.org/x/text/internal/catmsg/catmsg.go b/vendor/golang.org/x/text/internal/catmsg/catmsg.go
new file mode 100644
index 000000000..1b257a7b4
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/catmsg/catmsg.go
@@ -0,0 +1,417 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package catmsg contains support types for package x/text/message/catalog.
+//
+// This package contains the low-level implementations of Message used by the
+// catalog package and provides primitives for other packages to implement their
+// own. For instance, the plural package provides functionality for selecting
+// translation strings based on the plural category of substitution arguments.
+//
+// # Encoding and Decoding
+//
+// Catalogs store Messages encoded as a single string. Compiling a message into
+// a string both results in compacter representation and speeds up evaluation.
+//
+// A Message must implement a Compile method to convert its arbitrary
+// representation to a string. The Compile method takes an Encoder which
+// facilitates serializing the message. Encoders also provide more context of
+// the messages's creation (such as for which language the message is intended),
+// which may not be known at the time of the creation of the message.
+//
+// Each message type must also have an accompanying decoder registered to decode
+// the message. This decoder takes a Decoder argument which provides the
+// counterparts for the decoding.
+//
+// # Renderers
+//
+// A Decoder must be initialized with a Renderer implementation. These
+// implementations must be provided by packages that use Catalogs, typically
+// formatting packages such as x/text/message. A typical user will not need to
+// worry about this type; it is only relevant to packages that do string
+// formatting and want to use the catalog package to handle localized strings.
+//
+// A package that uses catalogs for selecting strings receives selection results
+// as sequence of substrings passed to the Renderer. The following snippet shows
+// how to express the above example using the message package.
+//
+// message.Set(language.English, "You are %d minute(s) late.",
+// catalog.Var("minutes", plural.Select(1, "one", "minute")),
+// catalog.String("You are %[1]d ${minutes} late."))
+//
+// p := message.NewPrinter(language.English)
+// p.Printf("You are %d minute(s) late.", 5) // always 5 minutes late.
+//
+// To evaluate the Printf, package message wraps the arguments in a Renderer
+// that is passed to the catalog for message decoding. The call sequence that
+// results from evaluating the above message, assuming the person is rather
+// tardy, is:
+//
+// Render("You are %[1]d ")
+// Arg(1)
+// Render("minutes")
+// Render(" late.")
+//
+// The calls to Arg is caused by the plural.Select execution, which evaluates
+// the argument to determine whether the singular or plural message form should
+// be selected. The calls to Render reports the partial results to the message
+// package for further evaluation.
+package catmsg
+
+import (
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+ "sync"
+
+ "golang.org/x/text/language"
+)
+
+// A Handle refers to a registered message type.
+type Handle int
+
+// A Handler decodes and evaluates data compiled by a Message and sends the
+// result to the Decoder. The output may depend on the value of the substitution
+// arguments, accessible by the Decoder's Arg method. The Handler returns false
+// if there is no translation for the given substitution arguments.
+type Handler func(d *Decoder) bool
+
+// Register records the existence of a message type and returns a Handle that
+// can be used in the Encoder's EncodeMessageType method to create such
+// messages. The prefix of the name should be the package path followed by
+// an optional disambiguating string.
+// Register will panic if a handle for the same name was already registered.
+func Register(name string, handler Handler) Handle {
+ mutex.Lock()
+ defer mutex.Unlock()
+
+ if _, ok := names[name]; ok {
+ panic(fmt.Errorf("catmsg: handler for %q already exists", name))
+ }
+ h := Handle(len(handlers))
+ names[name] = h
+ handlers = append(handlers, handler)
+ return h
+}
+
+// These handlers require fixed positions in the handlers slice.
+const (
+ msgVars Handle = iota
+ msgFirst
+ msgRaw
+ msgString
+ msgAffix
+ // Leave some arbitrary room for future expansion: 20 should suffice.
+ numInternal = 20
+)
+
+const prefix = "golang.org/x/text/internal/catmsg."
+
+var (
+ // TODO: find a more stable way to link handles to message types.
+ mutex sync.Mutex
+ names = map[string]Handle{
+ prefix + "Vars": msgVars,
+ prefix + "First": msgFirst,
+ prefix + "Raw": msgRaw,
+ prefix + "String": msgString,
+ prefix + "Affix": msgAffix,
+ }
+ handlers = make([]Handler, numInternal)
+)
+
+func init() {
+ // This handler is a message type wrapper that initializes a decoder
+ // with a variable block. This message type, if present, is always at the
+ // start of an encoded message.
+ handlers[msgVars] = func(d *Decoder) bool {
+ blockSize := int(d.DecodeUint())
+ d.vars = d.data[:blockSize]
+ d.data = d.data[blockSize:]
+ return d.executeMessage()
+ }
+
+ // First takes the first message in a sequence that results in a match for
+ // the given substitution arguments.
+ handlers[msgFirst] = func(d *Decoder) bool {
+ for !d.Done() {
+ if d.ExecuteMessage() {
+ return true
+ }
+ }
+ return false
+ }
+
+ handlers[msgRaw] = func(d *Decoder) bool {
+ d.Render(d.data)
+ return true
+ }
+
+ // A String message alternates between a string constant and a variable
+ // substitution.
+ handlers[msgString] = func(d *Decoder) bool {
+ for !d.Done() {
+ if str := d.DecodeString(); str != "" {
+ d.Render(str)
+ }
+ if d.Done() {
+ break
+ }
+ d.ExecuteSubstitution()
+ }
+ return true
+ }
+
+ handlers[msgAffix] = func(d *Decoder) bool {
+ // TODO: use an alternative method for common cases.
+ prefix := d.DecodeString()
+ suffix := d.DecodeString()
+ if prefix != "" {
+ d.Render(prefix)
+ }
+ ret := d.ExecuteMessage()
+ if suffix != "" {
+ d.Render(suffix)
+ }
+ return ret
+ }
+}
+
+var (
+ // ErrIncomplete indicates a compiled message does not define translations
+ // for all possible argument values. If this message is returned, evaluating
+ // a message may result in the ErrNoMatch error.
+ ErrIncomplete = errors.New("catmsg: incomplete message; may not give result for all inputs")
+
+ // ErrNoMatch indicates no translation message matched the given input
+ // parameters when evaluating a message.
+ ErrNoMatch = errors.New("catmsg: no translation for inputs")
+)
+
+// A Message holds a collection of translations for the same phrase that may
+// vary based on the values of substitution arguments.
+type Message interface {
+ // Compile encodes the format string(s) of the message as a string for later
+ // evaluation.
+ //
+ // The first call Compile makes on the encoder must be EncodeMessageType.
+ // The handle passed to this call may either be a handle returned by
+ // Register to encode a single custom message, or HandleFirst followed by
+ // a sequence of calls to EncodeMessage.
+ //
+ // Compile must return ErrIncomplete if it is possible for evaluation to
+ // not match any translation for a given set of formatting parameters.
+ // For example, selecting a translation based on plural form may not yield
+ // a match if the form "Other" is not one of the selectors.
+ //
+ // Compile may return any other application-specific error. For backwards
+ // compatibility with package like fmt, which often do not do sanity
+ // checking of format strings ahead of time, Compile should still make an
+ // effort to have some sensible fallback in case of an error.
+ Compile(e *Encoder) error
+}
+
+// Compile converts a Message to a data string that can be stored in a Catalog.
+// The resulting string can subsequently be decoded by passing to the Execute
+// method of a Decoder.
+func Compile(tag language.Tag, macros Dictionary, m Message) (data string, err error) {
+ // TODO: pass macros so they can be used for validation.
+ v := &Encoder{inBody: true} // encoder for variables
+ v.root = v
+ e := &Encoder{root: v, parent: v, tag: tag} // encoder for messages
+ err = m.Compile(e)
+ // This package serves te message package, which in turn is meant to be a
+ // drop-in replacement for fmt. With the fmt package, format strings are
+ // evaluated lazily and errors are handled by substituting strings in the
+ // result, rather then returning an error. Dealing with multiple languages
+ // makes it more important to check errors ahead of time. We chose to be
+ // consistent and compatible and allow graceful degradation in case of
+ // errors.
+ buf := e.buf[stripPrefix(e.buf):]
+ if len(v.buf) > 0 {
+ // Prepend variable block.
+ b := make([]byte, 1+maxVarintBytes+len(v.buf)+len(buf))
+ b[0] = byte(msgVars)
+ b = b[:1+encodeUint(b[1:], uint64(len(v.buf)))]
+ b = append(b, v.buf...)
+ b = append(b, buf...)
+ buf = b
+ }
+ if err == nil {
+ err = v.err
+ }
+ return string(buf), err
+}
+
+// FirstOf is a message type that prints the first message in the sequence that
+// resolves to a match for the given substitution arguments.
+type FirstOf []Message
+
+// Compile implements Message.
+func (s FirstOf) Compile(e *Encoder) error {
+ e.EncodeMessageType(msgFirst)
+ err := ErrIncomplete
+ for i, m := range s {
+ if err == nil {
+ return fmt.Errorf("catalog: message argument %d is complete and blocks subsequent messages", i-1)
+ }
+ err = e.EncodeMessage(m)
+ }
+ return err
+}
+
+// Var defines a message that can be substituted for a placeholder of the same
+// name. If an expression does not result in a string after evaluation, Name is
+// used as the substitution. For example:
+//
+// Var{
+// Name: "minutes",
+// Message: plural.Select(1, "one", "minute"),
+// }
+//
+// will resolve to minute for singular and minutes for plural forms.
+type Var struct {
+ Name string
+ Message Message
+}
+
+var errIsVar = errors.New("catmsg: variable used as message")
+
+// Compile implements Message.
+//
+// Note that this method merely registers a variable; it does not create an
+// encoded message.
+func (v *Var) Compile(e *Encoder) error {
+ if err := e.addVar(v.Name, v.Message); err != nil {
+ return err
+ }
+ // Using a Var by itself is an error. If it is in a sequence followed by
+ // other messages referring to it, this error will be ignored.
+ return errIsVar
+}
+
+// Raw is a message consisting of a single format string that is passed as is
+// to the Renderer.
+//
+// Note that a Renderer may still do its own variable substitution.
+type Raw string
+
+// Compile implements Message.
+func (r Raw) Compile(e *Encoder) (err error) {
+ e.EncodeMessageType(msgRaw)
+ // Special case: raw strings don't have a size encoding and so don't use
+ // EncodeString.
+ e.buf = append(e.buf, r...)
+ return nil
+}
+
+// String is a message consisting of a single format string which contains
+// placeholders that may be substituted with variables.
+//
+// Variable substitutions are marked with placeholders and a variable name of
+// the form ${name}. Any other substitutions such as Go templates or
+// printf-style substitutions are left to be done by the Renderer.
+//
+// When evaluation a string interpolation, a Renderer will receive separate
+// calls for each placeholder and interstitial string. For example, for the
+// message: "%[1]v ${invites} %[2]v to ${their} party." The sequence of calls
+// is:
+//
+// d.Render("%[1]v ")
+// d.Arg(1)
+// d.Render(resultOfInvites)
+// d.Render(" %[2]v to ")
+// d.Arg(2)
+// d.Render(resultOfTheir)
+// d.Render(" party.")
+//
+// where the messages for "invites" and "their" both use a plural.Select
+// referring to the first argument.
+//
+// Strings may also invoke macros. Macros are essentially variables that can be
+// reused. Macros may, for instance, be used to make selections between
+// different conjugations of a verb. See the catalog package description for an
+// overview of macros.
+type String string
+
+// Compile implements Message. It parses the placeholder formats and returns
+// any error.
+func (s String) Compile(e *Encoder) (err error) {
+ msg := string(s)
+ const subStart = "${"
+ hasHeader := false
+ p := 0
+ b := []byte{}
+ for {
+ i := strings.Index(msg[p:], subStart)
+ if i == -1 {
+ break
+ }
+ b = append(b, msg[p:p+i]...)
+ p += i + len(subStart)
+ if i = strings.IndexByte(msg[p:], '}'); i == -1 {
+ b = append(b, "$!(MISSINGBRACE)"...)
+ err = fmt.Errorf("catmsg: missing '}'")
+ p = len(msg)
+ break
+ }
+ name := strings.TrimSpace(msg[p : p+i])
+ if q := strings.IndexByte(name, '('); q == -1 {
+ if !hasHeader {
+ hasHeader = true
+ e.EncodeMessageType(msgString)
+ }
+ e.EncodeString(string(b))
+ e.EncodeSubstitution(name)
+ b = b[:0]
+ } else if j := strings.IndexByte(name[q:], ')'); j == -1 {
+ // TODO: what should the error be?
+ b = append(b, "$!(MISSINGPAREN)"...)
+ err = fmt.Errorf("catmsg: missing ')'")
+ } else if x, sErr := strconv.ParseUint(strings.TrimSpace(name[q+1:q+j]), 10, 32); sErr != nil {
+ // TODO: handle more than one argument
+ b = append(b, "$!(BADNUM)"...)
+ err = fmt.Errorf("catmsg: invalid number %q", strings.TrimSpace(name[q+1:q+j]))
+ } else {
+ if !hasHeader {
+ hasHeader = true
+ e.EncodeMessageType(msgString)
+ }
+ e.EncodeString(string(b))
+ e.EncodeSubstitution(name[:q], int(x))
+ b = b[:0]
+ }
+ p += i + 1
+ }
+ b = append(b, msg[p:]...)
+ if !hasHeader {
+ // Simplify string to a raw string.
+ Raw(string(b)).Compile(e)
+ } else if len(b) > 0 {
+ e.EncodeString(string(b))
+ }
+ return err
+}
+
+// Affix is a message that adds a prefix and suffix to another message.
+// This is mostly used add back whitespace to a translation that was stripped
+// before sending it out.
+type Affix struct {
+ Message Message
+ Prefix string
+ Suffix string
+}
+
+// Compile implements Message.
+func (a Affix) Compile(e *Encoder) (err error) {
+ // TODO: consider adding a special message type that just adds a single
+ // return. This is probably common enough to handle the majority of cases.
+ // Get some stats first, though.
+ e.EncodeMessageType(msgAffix)
+ e.EncodeString(a.Prefix)
+ e.EncodeString(a.Suffix)
+ e.EncodeMessage(a.Message)
+ return nil
+}
diff --git a/vendor/golang.org/x/text/internal/catmsg/codec.go b/vendor/golang.org/x/text/internal/catmsg/codec.go
new file mode 100644
index 000000000..547802b0f
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/catmsg/codec.go
@@ -0,0 +1,407 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package catmsg
+
+import (
+ "errors"
+ "fmt"
+
+ "golang.org/x/text/language"
+)
+
+// A Renderer renders a Message.
+type Renderer interface {
+ // Render renders the given string. The given string may be interpreted as a
+ // format string, such as the one used by the fmt package or a template.
+ Render(s string)
+
+ // Arg returns the i-th argument passed to format a message. This method
+ // should return nil if there is no such argument. Messages need access to
+ // arguments to allow selecting a message based on linguistic features of
+ // those arguments.
+ Arg(i int) interface{}
+}
+
+// A Dictionary specifies a source of messages, including variables or macros.
+type Dictionary interface {
+ // Lookup returns the message for the given key. It returns false for ok if
+ // such a message could not be found.
+ Lookup(key string) (data string, ok bool)
+
+ // TODO: consider returning an interface, instead of a string. This will
+ // allow implementations to do their own message type decoding.
+}
+
+// An Encoder serializes a Message to a string.
+type Encoder struct {
+ // The root encoder is used for storing encoded variables.
+ root *Encoder
+ // The parent encoder provides the surrounding scopes for resolving variable
+ // names.
+ parent *Encoder
+
+ tag language.Tag
+
+ // buf holds the encoded message so far. After a message completes encoding,
+ // the contents of buf, prefixed by the encoded length, are flushed to the
+ // parent buffer.
+ buf []byte
+
+ // vars is the lookup table of variables in the current scope.
+ vars []keyVal
+
+ err error
+ inBody bool // if false next call must be EncodeMessageType
+}
+
+type keyVal struct {
+ key string
+ offset int
+}
+
+// Language reports the language for which the encoded message will be stored
+// in the Catalog.
+func (e *Encoder) Language() language.Tag { return e.tag }
+
+func (e *Encoder) setError(err error) {
+ if e.root.err == nil {
+ e.root.err = err
+ }
+}
+
+// EncodeUint encodes x.
+func (e *Encoder) EncodeUint(x uint64) {
+ e.checkInBody()
+ var buf [maxVarintBytes]byte
+ n := encodeUint(buf[:], x)
+ e.buf = append(e.buf, buf[:n]...)
+}
+
+// EncodeString encodes s.
+func (e *Encoder) EncodeString(s string) {
+ e.checkInBody()
+ e.EncodeUint(uint64(len(s)))
+ e.buf = append(e.buf, s...)
+}
+
+// EncodeMessageType marks the current message to be of type h.
+//
+// It must be the first call of a Message's Compile method.
+func (e *Encoder) EncodeMessageType(h Handle) {
+ if e.inBody {
+ panic("catmsg: EncodeMessageType not the first method called")
+ }
+ e.inBody = true
+ e.EncodeUint(uint64(h))
+}
+
+// EncodeMessage serializes the given message inline at the current position.
+func (e *Encoder) EncodeMessage(m Message) error {
+ e = &Encoder{root: e.root, parent: e, tag: e.tag}
+ err := m.Compile(e)
+ if _, ok := m.(*Var); !ok {
+ e.flushTo(e.parent)
+ }
+ return err
+}
+
+func (e *Encoder) checkInBody() {
+ if !e.inBody {
+ panic("catmsg: expected prior call to EncodeMessageType")
+ }
+}
+
+// stripPrefix indicates the number of prefix bytes that must be stripped to
+// turn a single-element sequence into a message that is just this single member
+// without its size prefix. If the message can be stripped, b[1:n] contains the
+// size prefix.
+func stripPrefix(b []byte) (n int) {
+ if len(b) > 0 && Handle(b[0]) == msgFirst {
+ x, n, _ := decodeUint(b[1:])
+ if 1+n+int(x) == len(b) {
+ return 1 + n
+ }
+ }
+ return 0
+}
+
+func (e *Encoder) flushTo(dst *Encoder) {
+ data := e.buf
+ p := stripPrefix(data)
+ if p > 0 {
+ data = data[1:]
+ } else {
+ // Prefix the size.
+ dst.EncodeUint(uint64(len(data)))
+ }
+ dst.buf = append(dst.buf, data...)
+}
+
+func (e *Encoder) addVar(key string, m Message) error {
+ for _, v := range e.parent.vars {
+ if v.key == key {
+ err := fmt.Errorf("catmsg: duplicate variable %q", key)
+ e.setError(err)
+ return err
+ }
+ }
+ scope := e.parent
+ // If a variable message is Incomplete, and does not evaluate to a message
+ // during execution, we fall back to the variable name. We encode this by
+ // appending the variable name if the message reports it's incomplete.
+
+ err := m.Compile(e)
+ if err != ErrIncomplete {
+ e.setError(err)
+ }
+ switch {
+ case len(e.buf) == 1 && Handle(e.buf[0]) == msgFirst: // empty sequence
+ e.buf = e.buf[:0]
+ e.inBody = false
+ fallthrough
+ case len(e.buf) == 0:
+ // Empty message.
+ if err := String(key).Compile(e); err != nil {
+ e.setError(err)
+ }
+ case err == ErrIncomplete:
+ if Handle(e.buf[0]) != msgFirst {
+ seq := &Encoder{root: e.root, parent: e}
+ seq.EncodeMessageType(msgFirst)
+ e.flushTo(seq)
+ e = seq
+ }
+ // e contains a sequence; append the fallback string.
+ e.EncodeMessage(String(key))
+ }
+
+ // Flush result to variable heap.
+ offset := len(e.root.buf)
+ e.flushTo(e.root)
+ e.buf = e.buf[:0]
+
+ // Record variable offset in current scope.
+ scope.vars = append(scope.vars, keyVal{key: key, offset: offset})
+ return err
+}
+
+const (
+ substituteVar = iota
+ substituteMacro
+ substituteError
+)
+
+// EncodeSubstitution inserts a resolved reference to a variable or macro.
+//
+// This call must be matched with a call to ExecuteSubstitution at decoding
+// time.
+func (e *Encoder) EncodeSubstitution(name string, arguments ...int) {
+ if arity := len(arguments); arity > 0 {
+ // TODO: also resolve macros.
+ e.EncodeUint(substituteMacro)
+ e.EncodeString(name)
+ for _, a := range arguments {
+ e.EncodeUint(uint64(a))
+ }
+ return
+ }
+ for scope := e; scope != nil; scope = scope.parent {
+ for _, v := range scope.vars {
+ if v.key != name {
+ continue
+ }
+ e.EncodeUint(substituteVar) // TODO: support arity > 0
+ e.EncodeUint(uint64(v.offset))
+ return
+ }
+ }
+ // TODO: refer to dictionary-wide scoped variables.
+ e.EncodeUint(substituteError)
+ e.EncodeString(name)
+ e.setError(fmt.Errorf("catmsg: unknown var %q", name))
+}
+
+// A Decoder deserializes and evaluates messages that are encoded by an encoder.
+type Decoder struct {
+ tag language.Tag
+ dst Renderer
+ macros Dictionary
+
+ err error
+ vars string
+ data string
+
+ macroArg int // TODO: allow more than one argument
+}
+
+// NewDecoder returns a new Decoder.
+//
+// Decoders are designed to be reused for multiple invocations of Execute.
+// Only one goroutine may call Execute concurrently.
+func NewDecoder(tag language.Tag, r Renderer, macros Dictionary) *Decoder {
+ return &Decoder{
+ tag: tag,
+ dst: r,
+ macros: macros,
+ }
+}
+
+func (d *Decoder) setError(err error) {
+ if d.err == nil {
+ d.err = err
+ }
+}
+
+// Language returns the language in which the message is being rendered.
+//
+// The destination language may be a child language of the language used for
+// encoding. For instance, a decoding language of "pt-PT" is consistent with an
+// encoding language of "pt".
+func (d *Decoder) Language() language.Tag { return d.tag }
+
+// Done reports whether there are more bytes to process in this message.
+func (d *Decoder) Done() bool { return len(d.data) == 0 }
+
+// Render implements Renderer.
+func (d *Decoder) Render(s string) { d.dst.Render(s) }
+
+// Arg implements Renderer.
+//
+// During evaluation of macros, the argument positions may be mapped to
+// arguments that differ from the original call.
+func (d *Decoder) Arg(i int) interface{} {
+ if d.macroArg != 0 {
+ if i != 1 {
+ panic("catmsg: only macros with single argument supported")
+ }
+ i = d.macroArg
+ }
+ return d.dst.Arg(i)
+}
+
+// DecodeUint decodes a number that was encoded with EncodeUint and advances the
+// position.
+func (d *Decoder) DecodeUint() uint64 {
+ x, n, err := decodeUintString(d.data)
+ d.data = d.data[n:]
+ if err != nil {
+ d.setError(err)
+ }
+ return x
+}
+
+// DecodeString decodes a string that was encoded with EncodeString and advances
+// the position.
+func (d *Decoder) DecodeString() string {
+ size := d.DecodeUint()
+ s := d.data[:size]
+ d.data = d.data[size:]
+ return s
+}
+
+// SkipMessage skips the message at the current location and advances the
+// position.
+func (d *Decoder) SkipMessage() {
+ n := int(d.DecodeUint())
+ d.data = d.data[n:]
+}
+
+// Execute decodes and evaluates msg.
+//
+// Only one goroutine may call execute.
+func (d *Decoder) Execute(msg string) error {
+ d.err = nil
+ if !d.execute(msg) {
+ return ErrNoMatch
+ }
+ return d.err
+}
+
+func (d *Decoder) execute(msg string) bool {
+ saved := d.data
+ d.data = msg
+ ok := d.executeMessage()
+ d.data = saved
+ return ok
+}
+
+// executeMessageFromData is like execute, but also decodes a leading message
+// size and clips the given string accordingly.
+//
+// It reports the number of bytes consumed and whether a message was selected.
+func (d *Decoder) executeMessageFromData(s string) (n int, ok bool) {
+ saved := d.data
+ d.data = s
+ size := int(d.DecodeUint())
+ n = len(s) - len(d.data)
+ // Sanitize the setting. This allows skipping a size argument for
+ // RawString and method Done.
+ d.data = d.data[:size]
+ ok = d.executeMessage()
+ n += size - len(d.data)
+ d.data = saved
+ return n, ok
+}
+
+var errUnknownHandler = errors.New("catmsg: string contains unsupported handler")
+
+// executeMessage reads the handle id, initializes the decoder and executes the
+// message. It is assumed that all of d.data[d.p:] is the single message.
+func (d *Decoder) executeMessage() bool {
+ if d.Done() {
+ // We interpret no data as a valid empty message.
+ return true
+ }
+ handle := d.DecodeUint()
+
+ var fn Handler
+ mutex.Lock()
+ if int(handle) < len(handlers) {
+ fn = handlers[handle]
+ }
+ mutex.Unlock()
+ if fn == nil {
+ d.setError(errUnknownHandler)
+ d.execute(fmt.Sprintf("\x02$!(UNKNOWNMSGHANDLER=%#x)", handle))
+ return true
+ }
+ return fn(d)
+}
+
+// ExecuteMessage decodes and executes the message at the current position.
+func (d *Decoder) ExecuteMessage() bool {
+ n, ok := d.executeMessageFromData(d.data)
+ d.data = d.data[n:]
+ return ok
+}
+
+// ExecuteSubstitution executes the message corresponding to the substitution
+// as encoded by EncodeSubstitution.
+func (d *Decoder) ExecuteSubstitution() {
+ switch x := d.DecodeUint(); x {
+ case substituteVar:
+ offset := d.DecodeUint()
+ d.executeMessageFromData(d.vars[offset:])
+ case substituteMacro:
+ name := d.DecodeString()
+ data, ok := d.macros.Lookup(name)
+ old := d.macroArg
+ // TODO: support macros of arity other than 1.
+ d.macroArg = int(d.DecodeUint())
+ switch {
+ case !ok:
+ // TODO: detect this at creation time.
+ d.setError(fmt.Errorf("catmsg: undefined macro %q", name))
+ fallthrough
+ case !d.execute(data):
+ d.dst.Render(name) // fall back to macro name.
+ }
+ d.macroArg = old
+ case substituteError:
+ d.dst.Render(d.DecodeString())
+ default:
+ panic("catmsg: unreachable")
+ }
+}
diff --git a/vendor/golang.org/x/text/internal/catmsg/varint.go b/vendor/golang.org/x/text/internal/catmsg/varint.go
new file mode 100644
index 000000000..a2cee2cf5
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/catmsg/varint.go
@@ -0,0 +1,62 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package catmsg
+
+// This file implements varint encoding analogous to the one in encoding/binary.
+// We need a string version of this function, so we add that here and then add
+// the rest for consistency.
+
+import "errors"
+
+var (
+ errIllegalVarint = errors.New("catmsg: illegal varint")
+ errVarintTooLarge = errors.New("catmsg: varint too large for uint64")
+)
+
+const maxVarintBytes = 10 // maximum length of a varint
+
+// encodeUint encodes x as a variable-sized integer into buf and returns the
+// number of bytes written. buf must be at least maxVarintBytes long
+func encodeUint(buf []byte, x uint64) (n int) {
+ for ; x > 127; n++ {
+ buf[n] = 0x80 | uint8(x&0x7F)
+ x >>= 7
+ }
+ buf[n] = uint8(x)
+ n++
+ return n
+}
+
+func decodeUintString(s string) (x uint64, size int, err error) {
+ i := 0
+ for shift := uint(0); shift < 64; shift += 7 {
+ if i >= len(s) {
+ return 0, i, errIllegalVarint
+ }
+ b := uint64(s[i])
+ i++
+ x |= (b & 0x7F) << shift
+ if b&0x80 == 0 {
+ return x, i, nil
+ }
+ }
+ return 0, i, errVarintTooLarge
+}
+
+func decodeUint(b []byte) (x uint64, size int, err error) {
+ i := 0
+ for shift := uint(0); shift < 64; shift += 7 {
+ if i >= len(b) {
+ return 0, i, errIllegalVarint
+ }
+ c := uint64(b[i])
+ i++
+ x |= (c & 0x7F) << shift
+ if c&0x80 == 0 {
+ return x, i, nil
+ }
+ }
+ return 0, i, errVarintTooLarge
+}
diff --git a/vendor/golang.org/x/text/internal/format/format.go b/vendor/golang.org/x/text/internal/format/format.go
new file mode 100644
index 000000000..ee1c57a3c
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/format/format.go
@@ -0,0 +1,41 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package format contains types for defining language-specific formatting of
+// values.
+//
+// This package is internal now, but will eventually be exposed after the API
+// settles.
+package format // import "golang.org/x/text/internal/format"
+
+import (
+ "fmt"
+
+ "golang.org/x/text/language"
+)
+
+// State represents the printer state passed to custom formatters. It provides
+// access to the fmt.State interface and the sentence and language-related
+// context.
+type State interface {
+ fmt.State
+
+ // Language reports the requested language in which to render a message.
+ Language() language.Tag
+
+ // TODO: consider this and removing rune from the Format method in the
+ // Formatter interface.
+ //
+ // Verb returns the format variant to render, analogous to the types used
+ // in fmt. Use 'v' for the default or only variant.
+ // Verb() rune
+
+ // TODO: more info:
+ // - sentence context such as linguistic features passed by the translator.
+}
+
+// Formatter is analogous to fmt.Formatter.
+type Formatter interface {
+ Format(state State, verb rune)
+}
diff --git a/vendor/golang.org/x/text/internal/format/parser.go b/vendor/golang.org/x/text/internal/format/parser.go
new file mode 100644
index 000000000..855aed71d
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/format/parser.go
@@ -0,0 +1,358 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package format
+
+import (
+ "reflect"
+ "unicode/utf8"
+)
+
+// A Parser parses a format string. The result from the parse are set in the
+// struct fields.
+type Parser struct {
+ Verb rune
+
+ WidthPresent bool
+ PrecPresent bool
+ Minus bool
+ Plus bool
+ Sharp bool
+ Space bool
+ Zero bool
+
+ // For the formats %+v %#v, we set the plusV/sharpV flags
+ // and clear the plus/sharp flags since %+v and %#v are in effect
+ // different, flagless formats set at the top level.
+ PlusV bool
+ SharpV bool
+
+ HasIndex bool
+
+ Width int
+ Prec int // precision
+
+ // retain arguments across calls.
+ Args []interface{}
+ // retain current argument number across calls
+ ArgNum int
+
+ // reordered records whether the format string used argument reordering.
+ Reordered bool
+ // goodArgNum records whether the most recent reordering directive was valid.
+ goodArgNum bool
+
+ // position info
+ format string
+ startPos int
+ endPos int
+ Status Status
+}
+
+// Reset initializes a parser to scan format strings for the given args.
+func (p *Parser) Reset(args []interface{}) {
+ p.Args = args
+ p.ArgNum = 0
+ p.startPos = 0
+ p.Reordered = false
+}
+
+// Text returns the part of the format string that was parsed by the last call
+// to Scan. It returns the original substitution clause if the current scan
+// parsed a substitution.
+func (p *Parser) Text() string { return p.format[p.startPos:p.endPos] }
+
+// SetFormat sets a new format string to parse. It does not reset the argument
+// count.
+func (p *Parser) SetFormat(format string) {
+ p.format = format
+ p.startPos = 0
+ p.endPos = 0
+}
+
+// Status indicates the result type of a call to Scan.
+type Status int
+
+const (
+ StatusText Status = iota
+ StatusSubstitution
+ StatusBadWidthSubstitution
+ StatusBadPrecSubstitution
+ StatusNoVerb
+ StatusBadArgNum
+ StatusMissingArg
+)
+
+// ClearFlags reset the parser to default behavior.
+func (p *Parser) ClearFlags() {
+ p.WidthPresent = false
+ p.PrecPresent = false
+ p.Minus = false
+ p.Plus = false
+ p.Sharp = false
+ p.Space = false
+ p.Zero = false
+
+ p.PlusV = false
+ p.SharpV = false
+
+ p.HasIndex = false
+}
+
+// Scan scans the next part of the format string and sets the status to
+// indicate whether it scanned a string literal, substitution or error.
+func (p *Parser) Scan() bool {
+ p.Status = StatusText
+ format := p.format
+ end := len(format)
+ if p.endPos >= end {
+ return false
+ }
+ afterIndex := false // previous item in format was an index like [3].
+
+ p.startPos = p.endPos
+ p.goodArgNum = true
+ i := p.startPos
+ for i < end && format[i] != '%' {
+ i++
+ }
+ if i > p.startPos {
+ p.endPos = i
+ return true
+ }
+ // Process one verb
+ i++
+
+ p.Status = StatusSubstitution
+
+ // Do we have flags?
+ p.ClearFlags()
+
+simpleFormat:
+ for ; i < end; i++ {
+ c := p.format[i]
+ switch c {
+ case '#':
+ p.Sharp = true
+ case '0':
+ p.Zero = !p.Minus // Only allow zero padding to the left.
+ case '+':
+ p.Plus = true
+ case '-':
+ p.Minus = true
+ p.Zero = false // Do not pad with zeros to the right.
+ case ' ':
+ p.Space = true
+ default:
+ // Fast path for common case of ascii lower case simple verbs
+ // without precision or width or argument indices.
+ if 'a' <= c && c <= 'z' && p.ArgNum < len(p.Args) {
+ if c == 'v' {
+ // Go syntax
+ p.SharpV = p.Sharp
+ p.Sharp = false
+ // Struct-field syntax
+ p.PlusV = p.Plus
+ p.Plus = false
+ }
+ p.Verb = rune(c)
+ p.ArgNum++
+ p.endPos = i + 1
+ return true
+ }
+ // Format is more complex than simple flags and a verb or is malformed.
+ break simpleFormat
+ }
+ }
+
+ // Do we have an explicit argument index?
+ i, afterIndex = p.updateArgNumber(format, i)
+
+ // Do we have width?
+ if i < end && format[i] == '*' {
+ i++
+ p.Width, p.WidthPresent = p.intFromArg()
+
+ if !p.WidthPresent {
+ p.Status = StatusBadWidthSubstitution
+ }
+
+ // We have a negative width, so take its value and ensure
+ // that the minus flag is set
+ if p.Width < 0 {
+ p.Width = -p.Width
+ p.Minus = true
+ p.Zero = false // Do not pad with zeros to the right.
+ }
+ afterIndex = false
+ } else {
+ p.Width, p.WidthPresent, i = parsenum(format, i, end)
+ if afterIndex && p.WidthPresent { // "%[3]2d"
+ p.goodArgNum = false
+ }
+ }
+
+ // Do we have precision?
+ if i+1 < end && format[i] == '.' {
+ i++
+ if afterIndex { // "%[3].2d"
+ p.goodArgNum = false
+ }
+ i, afterIndex = p.updateArgNumber(format, i)
+ if i < end && format[i] == '*' {
+ i++
+ p.Prec, p.PrecPresent = p.intFromArg()
+ // Negative precision arguments don't make sense
+ if p.Prec < 0 {
+ p.Prec = 0
+ p.PrecPresent = false
+ }
+ if !p.PrecPresent {
+ p.Status = StatusBadPrecSubstitution
+ }
+ afterIndex = false
+ } else {
+ p.Prec, p.PrecPresent, i = parsenum(format, i, end)
+ if !p.PrecPresent {
+ p.Prec = 0
+ p.PrecPresent = true
+ }
+ }
+ }
+
+ if !afterIndex {
+ i, afterIndex = p.updateArgNumber(format, i)
+ }
+ p.HasIndex = afterIndex
+
+ if i >= end {
+ p.endPos = i
+ p.Status = StatusNoVerb
+ return true
+ }
+
+ verb, w := utf8.DecodeRuneInString(format[i:])
+ p.endPos = i + w
+ p.Verb = verb
+
+ switch {
+ case verb == '%': // Percent does not absorb operands and ignores f.wid and f.prec.
+ p.startPos = p.endPos - 1
+ p.Status = StatusText
+ case !p.goodArgNum:
+ p.Status = StatusBadArgNum
+ case p.ArgNum >= len(p.Args): // No argument left over to print for the current verb.
+ p.Status = StatusMissingArg
+ p.ArgNum++
+ case verb == 'v':
+ // Go syntax
+ p.SharpV = p.Sharp
+ p.Sharp = false
+ // Struct-field syntax
+ p.PlusV = p.Plus
+ p.Plus = false
+ fallthrough
+ default:
+ p.ArgNum++
+ }
+ return true
+}
+
+// intFromArg gets the ArgNumth element of Args. On return, isInt reports
+// whether the argument has integer type.
+func (p *Parser) intFromArg() (num int, isInt bool) {
+ if p.ArgNum < len(p.Args) {
+ arg := p.Args[p.ArgNum]
+ num, isInt = arg.(int) // Almost always OK.
+ if !isInt {
+ // Work harder.
+ switch v := reflect.ValueOf(arg); v.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ n := v.Int()
+ if int64(int(n)) == n {
+ num = int(n)
+ isInt = true
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ n := v.Uint()
+ if int64(n) >= 0 && uint64(int(n)) == n {
+ num = int(n)
+ isInt = true
+ }
+ default:
+ // Already 0, false.
+ }
+ }
+ p.ArgNum++
+ if tooLarge(num) {
+ num = 0
+ isInt = false
+ }
+ }
+ return
+}
+
+// parseArgNumber returns the value of the bracketed number, minus 1
+// (explicit argument numbers are one-indexed but we want zero-indexed).
+// The opening bracket is known to be present at format[0].
+// The returned values are the index, the number of bytes to consume
+// up to the closing paren, if present, and whether the number parsed
+// ok. The bytes to consume will be 1 if no closing paren is present.
+func parseArgNumber(format string) (index int, wid int, ok bool) {
+ // There must be at least 3 bytes: [n].
+ if len(format) < 3 {
+ return 0, 1, false
+ }
+
+ // Find closing bracket.
+ for i := 1; i < len(format); i++ {
+ if format[i] == ']' {
+ width, ok, newi := parsenum(format, 1, i)
+ if !ok || newi != i {
+ return 0, i + 1, false
+ }
+ return width - 1, i + 1, true // arg numbers are one-indexed and skip paren.
+ }
+ }
+ return 0, 1, false
+}
+
+// updateArgNumber returns the next argument to evaluate, which is either the value of the passed-in
+// argNum or the value of the bracketed integer that begins format[i:]. It also returns
+// the new value of i, that is, the index of the next byte of the format to process.
+func (p *Parser) updateArgNumber(format string, i int) (newi int, found bool) {
+ if len(format) <= i || format[i] != '[' {
+ return i, false
+ }
+ p.Reordered = true
+ index, wid, ok := parseArgNumber(format[i:])
+ if ok && 0 <= index && index < len(p.Args) {
+ p.ArgNum = index
+ return i + wid, true
+ }
+ p.goodArgNum = false
+ return i + wid, ok
+}
+
+// tooLarge reports whether the magnitude of the integer is
+// too large to be used as a formatting width or precision.
+func tooLarge(x int) bool {
+ const max int = 1e6
+ return x > max || x < -max
+}
+
+// parsenum converts ASCII to integer. num is 0 (and isnum is false) if no number present.
+func parsenum(s string, start, end int) (num int, isnum bool, newi int) {
+ if start >= end {
+ return 0, false, end
+ }
+ for newi = start; newi < end && '0' <= s[newi] && s[newi] <= '9'; newi++ {
+ if tooLarge(num) {
+ return 0, false, end // Overflow; crazy long number most likely.
+ }
+ num = num*10 + int(s[newi]-'0')
+ isnum = true
+ }
+ return
+}
diff --git a/vendor/golang.org/x/text/internal/internal.go b/vendor/golang.org/x/text/internal/internal.go
new file mode 100644
index 000000000..3cddbbdda
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/internal.go
@@ -0,0 +1,49 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package internal contains non-exported functionality that are used by
+// packages in the text repository.
+package internal // import "golang.org/x/text/internal"
+
+import (
+ "sort"
+
+ "golang.org/x/text/language"
+)
+
+// SortTags sorts tags in place.
+func SortTags(tags []language.Tag) {
+ sort.Sort(sorter(tags))
+}
+
+type sorter []language.Tag
+
+func (s sorter) Len() int {
+ return len(s)
+}
+
+func (s sorter) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s sorter) Less(i, j int) bool {
+ return s[i].String() < s[j].String()
+}
+
+// UniqueTags sorts and filters duplicate tags in place and returns a slice with
+// only unique tags.
+func UniqueTags(tags []language.Tag) []language.Tag {
+ if len(tags) <= 1 {
+ return tags
+ }
+ SortTags(tags)
+ k := 0
+ for i := 1; i < len(tags); i++ {
+ if tags[k].String() < tags[i].String() {
+ k++
+ tags[k] = tags[i]
+ }
+ }
+ return tags[:k+1]
+}
diff --git a/vendor/golang.org/x/text/internal/language/common.go b/vendor/golang.org/x/text/internal/language/common.go
new file mode 100644
index 000000000..cdfdb7497
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/language/common.go
@@ -0,0 +1,16 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+package language
+
+// This file contains code common to the maketables.go and the package code.
+
+// AliasType is the type of an alias in AliasMap.
+type AliasType int8
+
+const (
+ Deprecated AliasType = iota
+ Macro
+ Legacy
+
+ AliasTypeUnknown AliasType = -1
+)
diff --git a/vendor/golang.org/x/text/internal/language/compact.go b/vendor/golang.org/x/text/internal/language/compact.go
new file mode 100644
index 000000000..46a001507
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/language/compact.go
@@ -0,0 +1,29 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package language
+
+// CompactCoreInfo is a compact integer with the three core tags encoded.
+type CompactCoreInfo uint32
+
+// GetCompactCore generates a uint32 value that is guaranteed to be unique for
+// different language, region, and script values.
+func GetCompactCore(t Tag) (cci CompactCoreInfo, ok bool) {
+ if t.LangID > langNoIndexOffset {
+ return 0, false
+ }
+ cci |= CompactCoreInfo(t.LangID) << (8 + 12)
+ cci |= CompactCoreInfo(t.ScriptID) << 12
+ cci |= CompactCoreInfo(t.RegionID)
+ return cci, true
+}
+
+// Tag generates a tag from c.
+func (c CompactCoreInfo) Tag() Tag {
+ return Tag{
+ LangID: Language(c >> 20),
+ RegionID: Region(c & 0x3ff),
+ ScriptID: Script(c>>12) & 0xff,
+ }
+}
diff --git a/vendor/golang.org/x/text/internal/language/compact/compact.go b/vendor/golang.org/x/text/internal/language/compact/compact.go
new file mode 100644
index 000000000..1b36935ef
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/language/compact/compact.go
@@ -0,0 +1,61 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package compact defines a compact representation of language tags.
+//
+// Common language tags (at least all for which locale information is defined
+// in CLDR) are assigned a unique index. Each Tag is associated with such an
+// ID for selecting language-related resources (such as translations) as well
+// as one for selecting regional defaults (currency, number formatting, etc.)
+//
+// It may want to export this functionality at some point, but at this point
+// this is only available for use within x/text.
+package compact // import "golang.org/x/text/internal/language/compact"
+
+import (
+ "sort"
+ "strings"
+
+ "golang.org/x/text/internal/language"
+)
+
+// ID is an integer identifying a single tag.
+type ID uint16
+
+func getCoreIndex(t language.Tag) (id ID, ok bool) {
+ cci, ok := language.GetCompactCore(t)
+ if !ok {
+ return 0, false
+ }
+ i := sort.Search(len(coreTags), func(i int) bool {
+ return cci <= coreTags[i]
+ })
+ if i == len(coreTags) || coreTags[i] != cci {
+ return 0, false
+ }
+ return ID(i), true
+}
+
+// Parent returns the ID of the parent or the root ID if id is already the root.
+func (id ID) Parent() ID {
+ return parents[id]
+}
+
+// Tag converts id to an internal language Tag.
+func (id ID) Tag() language.Tag {
+ if int(id) >= len(coreTags) {
+ return specialTags[int(id)-len(coreTags)]
+ }
+ return coreTags[id].Tag()
+}
+
+var specialTags []language.Tag
+
+func init() {
+ tags := strings.Split(specialTagsStr, " ")
+ specialTags = make([]language.Tag, len(tags))
+ for i, t := range tags {
+ specialTags[i] = language.MustParse(t)
+ }
+}
diff --git a/vendor/golang.org/x/text/internal/language/compact/language.go b/vendor/golang.org/x/text/internal/language/compact/language.go
new file mode 100644
index 000000000..8c1b6666f
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/language/compact/language.go
@@ -0,0 +1,260 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate go run gen.go gen_index.go -output tables.go
+//go:generate go run gen_parents.go
+
+package compact
+
+// TODO: Remove above NOTE after:
+// - verifying that tables are dropped correctly (most notably matcher tables).
+
+import (
+ "strings"
+
+ "golang.org/x/text/internal/language"
+)
+
+// Tag represents a BCP 47 language tag. It is used to specify an instance of a
+// specific language or locale. All language tag values are guaranteed to be
+// well-formed.
+type Tag struct {
+ // NOTE: exported tags will become part of the public API.
+ language ID
+ locale ID
+ full fullTag // always a language.Tag for now.
+}
+
+const _und = 0
+
+type fullTag interface {
+ IsRoot() bool
+ Parent() language.Tag
+}
+
+// Make a compact Tag from a fully specified internal language Tag.
+func Make(t language.Tag) (tag Tag) {
+ if region := t.TypeForKey("rg"); len(region) == 6 && region[2:] == "zzzz" {
+ if r, err := language.ParseRegion(region[:2]); err == nil {
+ tFull := t
+ t, _ = t.SetTypeForKey("rg", "")
+ // TODO: should we not consider "va" for the language tag?
+ var exact1, exact2 bool
+ tag.language, exact1 = FromTag(t)
+ t.RegionID = r
+ tag.locale, exact2 = FromTag(t)
+ if !exact1 || !exact2 {
+ tag.full = tFull
+ }
+ return tag
+ }
+ }
+ lang, ok := FromTag(t)
+ tag.language = lang
+ tag.locale = lang
+ if !ok {
+ tag.full = t
+ }
+ return tag
+}
+
+// Tag returns an internal language Tag version of this tag.
+func (t Tag) Tag() language.Tag {
+ if t.full != nil {
+ return t.full.(language.Tag)
+ }
+ tag := t.language.Tag()
+ if t.language != t.locale {
+ loc := t.locale.Tag()
+ tag, _ = tag.SetTypeForKey("rg", strings.ToLower(loc.RegionID.String())+"zzzz")
+ }
+ return tag
+}
+
+// IsCompact reports whether this tag is fully defined in terms of ID.
+func (t *Tag) IsCompact() bool {
+ return t.full == nil
+}
+
+// MayHaveVariants reports whether a tag may have variants. If it returns false
+// it is guaranteed the tag does not have variants.
+func (t Tag) MayHaveVariants() bool {
+ return t.full != nil || int(t.language) >= len(coreTags)
+}
+
+// MayHaveExtensions reports whether a tag may have extensions. If it returns
+// false it is guaranteed the tag does not have them.
+func (t Tag) MayHaveExtensions() bool {
+ return t.full != nil ||
+ int(t.language) >= len(coreTags) ||
+ t.language != t.locale
+}
+
+// IsRoot returns true if t is equal to language "und".
+func (t Tag) IsRoot() bool {
+ if t.full != nil {
+ return t.full.IsRoot()
+ }
+ return t.language == _und
+}
+
+// Parent returns the CLDR parent of t. In CLDR, missing fields in data for a
+// specific language are substituted with fields from the parent language.
+// The parent for a language may change for newer versions of CLDR.
+func (t Tag) Parent() Tag {
+ if t.full != nil {
+ return Make(t.full.Parent())
+ }
+ if t.language != t.locale {
+ // Simulate stripping -u-rg-xxxxxx
+ return Tag{language: t.language, locale: t.language}
+ }
+ // TODO: use parent lookup table once cycle from internal package is
+ // removed. Probably by internalizing the table and declaring this fast
+ // enough.
+ // lang := compactID(internal.Parent(uint16(t.language)))
+ lang, _ := FromTag(t.language.Tag().Parent())
+ return Tag{language: lang, locale: lang}
+}
+
+// nextToken returns token t and the rest of the string.
+func nextToken(s string) (t, tail string) {
+ p := strings.Index(s[1:], "-")
+ if p == -1 {
+ return s[1:], ""
+ }
+ p++
+ return s[1:p], s[p:]
+}
+
+// LanguageID returns an index, where 0 <= index < NumCompactTags, for tags
+// for which data exists in the text repository.The index will change over time
+// and should not be stored in persistent storage. If t does not match a compact
+// index, exact will be false and the compact index will be returned for the
+// first match after repeatedly taking the Parent of t.
+func LanguageID(t Tag) (id ID, exact bool) {
+ return t.language, t.full == nil
+}
+
+// RegionalID returns the ID for the regional variant of this tag. This index is
+// used to indicate region-specific overrides, such as default currency, default
+// calendar and week data, default time cycle, and default measurement system
+// and unit preferences.
+//
+// For instance, the tag en-GB-u-rg-uszzzz specifies British English with US
+// settings for currency, number formatting, etc. The CompactIndex for this tag
+// will be that for en-GB, while the RegionalID will be the one corresponding to
+// en-US.
+func RegionalID(t Tag) (id ID, exact bool) {
+ return t.locale, t.full == nil
+}
+
+// LanguageTag returns t stripped of regional variant indicators.
+//
+// At the moment this means it is stripped of a regional and variant subtag "rg"
+// and "va" in the "u" extension.
+func (t Tag) LanguageTag() Tag {
+ if t.full == nil {
+ return Tag{language: t.language, locale: t.language}
+ }
+ tt := t.Tag()
+ tt.SetTypeForKey("rg", "")
+ tt.SetTypeForKey("va", "")
+ return Make(tt)
+}
+
+// RegionalTag returns the regional variant of the tag.
+//
+// At the moment this means that the region is set from the regional subtag
+// "rg" in the "u" extension.
+func (t Tag) RegionalTag() Tag {
+ rt := Tag{language: t.locale, locale: t.locale}
+ if t.full == nil {
+ return rt
+ }
+ b := language.Builder{}
+ tag := t.Tag()
+ // tag, _ = tag.SetTypeForKey("rg", "")
+ b.SetTag(t.locale.Tag())
+ if v := tag.Variants(); v != "" {
+ for _, v := range strings.Split(v, "-") {
+ b.AddVariant(v)
+ }
+ }
+ for _, e := range tag.Extensions() {
+ b.AddExt(e)
+ }
+ return t
+}
+
+// FromTag reports closest matching ID for an internal language Tag.
+func FromTag(t language.Tag) (id ID, exact bool) {
+ // TODO: perhaps give more frequent tags a lower index.
+ // TODO: we could make the indexes stable. This will excluded some
+ // possibilities for optimization, so don't do this quite yet.
+ exact = true
+
+ b, s, r := t.Raw()
+ if t.HasString() {
+ if t.IsPrivateUse() {
+ // We have no entries for user-defined tags.
+ return 0, false
+ }
+ hasExtra := false
+ if t.HasVariants() {
+ if t.HasExtensions() {
+ build := language.Builder{}
+ build.SetTag(language.Tag{LangID: b, ScriptID: s, RegionID: r})
+ build.AddVariant(t.Variants())
+ exact = false
+ t = build.Make()
+ }
+ hasExtra = true
+ } else if _, ok := t.Extension('u'); ok {
+ // TODO: va may mean something else. Consider not considering it.
+ // Strip all but the 'va' entry.
+ old := t
+ variant := t.TypeForKey("va")
+ t = language.Tag{LangID: b, ScriptID: s, RegionID: r}
+ if variant != "" {
+ t, _ = t.SetTypeForKey("va", variant)
+ hasExtra = true
+ }
+ exact = old == t
+ } else {
+ exact = false
+ }
+ if hasExtra {
+ // We have some variants.
+ for i, s := range specialTags {
+ if s == t {
+ return ID(i + len(coreTags)), exact
+ }
+ }
+ exact = false
+ }
+ }
+ if x, ok := getCoreIndex(t); ok {
+ return x, exact
+ }
+ exact = false
+ if r != 0 && s == 0 {
+ // Deal with cases where an extra script is inserted for the region.
+ t, _ := t.Maximize()
+ if x, ok := getCoreIndex(t); ok {
+ return x, exact
+ }
+ }
+ for t = t.Parent(); t != root; t = t.Parent() {
+ // No variants specified: just compare core components.
+ // The key has the form lllssrrr, where l, s, and r are nibbles for
+ // respectively the langID, scriptID, and regionID.
+ if x, ok := getCoreIndex(t); ok {
+ return x, exact
+ }
+ }
+ return 0, exact
+}
+
+var root = language.Tag{}
diff --git a/vendor/golang.org/x/text/internal/language/compact/parents.go b/vendor/golang.org/x/text/internal/language/compact/parents.go
new file mode 100644
index 000000000..8d810723c
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/language/compact/parents.go
@@ -0,0 +1,120 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+package compact
+
+// parents maps a compact index of a tag to the compact index of the parent of
+// this tag.
+var parents = []ID{ // 775 elements
+ // Entry 0 - 3F
+ 0x0000, 0x0000, 0x0001, 0x0001, 0x0000, 0x0004, 0x0000, 0x0006,
+ 0x0000, 0x0008, 0x0000, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a,
+ 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a,
+ 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a,
+ 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x0000,
+ 0x0000, 0x0028, 0x0000, 0x002a, 0x0000, 0x002c, 0x0000, 0x0000,
+ 0x002f, 0x002e, 0x002e, 0x0000, 0x0033, 0x0000, 0x0035, 0x0000,
+ 0x0037, 0x0000, 0x0039, 0x0000, 0x003b, 0x0000, 0x0000, 0x003e,
+ // Entry 40 - 7F
+ 0x0000, 0x0040, 0x0040, 0x0000, 0x0043, 0x0043, 0x0000, 0x0046,
+ 0x0000, 0x0048, 0x0000, 0x0000, 0x004b, 0x004a, 0x004a, 0x0000,
+ 0x004f, 0x004f, 0x004f, 0x004f, 0x0000, 0x0054, 0x0054, 0x0000,
+ 0x0057, 0x0000, 0x0059, 0x0000, 0x005b, 0x0000, 0x005d, 0x005d,
+ 0x0000, 0x0060, 0x0000, 0x0062, 0x0000, 0x0064, 0x0000, 0x0066,
+ 0x0066, 0x0000, 0x0069, 0x0000, 0x006b, 0x006b, 0x006b, 0x006b,
+ 0x006b, 0x006b, 0x006b, 0x0000, 0x0073, 0x0000, 0x0075, 0x0000,
+ 0x0077, 0x0000, 0x0000, 0x007a, 0x0000, 0x007c, 0x0000, 0x007e,
+ // Entry 80 - BF
+ 0x0000, 0x0080, 0x0080, 0x0000, 0x0083, 0x0083, 0x0000, 0x0086,
+ 0x0087, 0x0087, 0x0087, 0x0086, 0x0088, 0x0087, 0x0087, 0x0087,
+ 0x0086, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0088,
+ 0x0087, 0x0087, 0x0087, 0x0087, 0x0088, 0x0087, 0x0088, 0x0087,
+ 0x0087, 0x0088, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087,
+ 0x0087, 0x0087, 0x0087, 0x0086, 0x0087, 0x0087, 0x0087, 0x0087,
+ 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087,
+ 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0086, 0x0087, 0x0086,
+ // Entry C0 - FF
+ 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087,
+ 0x0088, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087,
+ 0x0086, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0088, 0x0087,
+ 0x0087, 0x0088, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087,
+ 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0086, 0x0086, 0x0087,
+ 0x0087, 0x0086, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0000,
+ 0x00ef, 0x0000, 0x00f1, 0x00f2, 0x00f2, 0x00f2, 0x00f2, 0x00f2,
+ 0x00f2, 0x00f2, 0x00f2, 0x00f2, 0x00f1, 0x00f2, 0x00f1, 0x00f1,
+ // Entry 100 - 13F
+ 0x00f2, 0x00f2, 0x00f1, 0x00f2, 0x00f2, 0x00f2, 0x00f2, 0x00f1,
+ 0x00f2, 0x00f2, 0x00f2, 0x00f2, 0x00f2, 0x00f2, 0x0000, 0x010e,
+ 0x0000, 0x0110, 0x0000, 0x0112, 0x0000, 0x0114, 0x0114, 0x0000,
+ 0x0117, 0x0117, 0x0117, 0x0117, 0x0000, 0x011c, 0x0000, 0x011e,
+ 0x0000, 0x0120, 0x0120, 0x0000, 0x0123, 0x0123, 0x0123, 0x0123,
+ 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123,
+ 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123,
+ 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123,
+ // Entry 140 - 17F
+ 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123,
+ 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123,
+ 0x0123, 0x0123, 0x0000, 0x0152, 0x0000, 0x0154, 0x0000, 0x0156,
+ 0x0000, 0x0158, 0x0000, 0x015a, 0x0000, 0x015c, 0x015c, 0x015c,
+ 0x0000, 0x0160, 0x0000, 0x0000, 0x0163, 0x0000, 0x0165, 0x0000,
+ 0x0167, 0x0167, 0x0167, 0x0000, 0x016b, 0x0000, 0x016d, 0x0000,
+ 0x016f, 0x0000, 0x0171, 0x0171, 0x0000, 0x0174, 0x0000, 0x0176,
+ 0x0000, 0x0178, 0x0000, 0x017a, 0x0000, 0x017c, 0x0000, 0x017e,
+ // Entry 180 - 1BF
+ 0x0000, 0x0000, 0x0000, 0x0182, 0x0000, 0x0184, 0x0184, 0x0184,
+ 0x0184, 0x0000, 0x0000, 0x0000, 0x018b, 0x0000, 0x0000, 0x018e,
+ 0x0000, 0x0000, 0x0191, 0x0000, 0x0000, 0x0000, 0x0195, 0x0000,
+ 0x0197, 0x0000, 0x0000, 0x019a, 0x0000, 0x0000, 0x019d, 0x0000,
+ 0x019f, 0x0000, 0x01a1, 0x0000, 0x01a3, 0x0000, 0x01a5, 0x0000,
+ 0x01a7, 0x0000, 0x01a9, 0x0000, 0x01ab, 0x0000, 0x01ad, 0x0000,
+ 0x01af, 0x0000, 0x01b1, 0x01b1, 0x0000, 0x01b4, 0x0000, 0x01b6,
+ 0x0000, 0x01b8, 0x0000, 0x01ba, 0x0000, 0x01bc, 0x0000, 0x0000,
+ // Entry 1C0 - 1FF
+ 0x01bf, 0x0000, 0x01c1, 0x0000, 0x01c3, 0x0000, 0x01c5, 0x0000,
+ 0x01c7, 0x0000, 0x01c9, 0x0000, 0x01cb, 0x01cb, 0x01cb, 0x01cb,
+ 0x0000, 0x01d0, 0x0000, 0x01d2, 0x01d2, 0x0000, 0x01d5, 0x0000,
+ 0x01d7, 0x0000, 0x01d9, 0x0000, 0x01db, 0x0000, 0x01dd, 0x0000,
+ 0x01df, 0x01df, 0x0000, 0x01e2, 0x0000, 0x01e4, 0x0000, 0x01e6,
+ 0x0000, 0x01e8, 0x0000, 0x01ea, 0x0000, 0x01ec, 0x0000, 0x01ee,
+ 0x0000, 0x01f0, 0x0000, 0x0000, 0x01f3, 0x0000, 0x01f5, 0x01f5,
+ 0x01f5, 0x0000, 0x01f9, 0x0000, 0x01fb, 0x0000, 0x01fd, 0x0000,
+ // Entry 200 - 23F
+ 0x01ff, 0x0000, 0x0000, 0x0202, 0x0000, 0x0204, 0x0204, 0x0000,
+ 0x0207, 0x0000, 0x0209, 0x0209, 0x0000, 0x020c, 0x020c, 0x0000,
+ 0x020f, 0x020f, 0x020f, 0x020f, 0x020f, 0x020f, 0x020f, 0x0000,
+ 0x0217, 0x0000, 0x0219, 0x0000, 0x021b, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0221, 0x0000, 0x0000, 0x0224, 0x0000, 0x0226,
+ 0x0226, 0x0000, 0x0229, 0x0000, 0x022b, 0x022b, 0x0000, 0x0000,
+ 0x022f, 0x022e, 0x022e, 0x0000, 0x0000, 0x0234, 0x0000, 0x0236,
+ 0x0000, 0x0238, 0x0000, 0x0244, 0x023a, 0x0244, 0x0244, 0x0244,
+ // Entry 240 - 27F
+ 0x0244, 0x0244, 0x0244, 0x0244, 0x023a, 0x0244, 0x0244, 0x0000,
+ 0x0247, 0x0247, 0x0247, 0x0000, 0x024b, 0x0000, 0x024d, 0x0000,
+ 0x024f, 0x024f, 0x0000, 0x0252, 0x0000, 0x0254, 0x0254, 0x0254,
+ 0x0254, 0x0254, 0x0254, 0x0000, 0x025b, 0x0000, 0x025d, 0x0000,
+ 0x025f, 0x0000, 0x0261, 0x0000, 0x0263, 0x0000, 0x0265, 0x0000,
+ 0x0000, 0x0268, 0x0268, 0x0268, 0x0000, 0x026c, 0x0000, 0x026e,
+ 0x0000, 0x0270, 0x0000, 0x0000, 0x0000, 0x0274, 0x0273, 0x0273,
+ 0x0000, 0x0278, 0x0000, 0x027a, 0x0000, 0x027c, 0x0000, 0x0000,
+ // Entry 280 - 2BF
+ 0x0000, 0x0000, 0x0281, 0x0000, 0x0000, 0x0284, 0x0000, 0x0286,
+ 0x0286, 0x0286, 0x0286, 0x0000, 0x028b, 0x028b, 0x028b, 0x0000,
+ 0x028f, 0x028f, 0x028f, 0x028f, 0x028f, 0x0000, 0x0295, 0x0295,
+ 0x0295, 0x0295, 0x0000, 0x0000, 0x0000, 0x0000, 0x029d, 0x029d,
+ 0x029d, 0x0000, 0x02a1, 0x02a1, 0x02a1, 0x02a1, 0x0000, 0x0000,
+ 0x02a7, 0x02a7, 0x02a7, 0x02a7, 0x0000, 0x02ac, 0x0000, 0x02ae,
+ 0x02ae, 0x0000, 0x02b1, 0x0000, 0x02b3, 0x0000, 0x02b5, 0x02b5,
+ 0x0000, 0x0000, 0x02b9, 0x0000, 0x0000, 0x0000, 0x02bd, 0x0000,
+ // Entry 2C0 - 2FF
+ 0x02bf, 0x02bf, 0x0000, 0x0000, 0x02c3, 0x0000, 0x02c5, 0x0000,
+ 0x02c7, 0x0000, 0x02c9, 0x0000, 0x02cb, 0x0000, 0x02cd, 0x02cd,
+ 0x0000, 0x0000, 0x02d1, 0x0000, 0x02d3, 0x02d0, 0x02d0, 0x0000,
+ 0x0000, 0x02d8, 0x02d7, 0x02d7, 0x0000, 0x0000, 0x02dd, 0x0000,
+ 0x02df, 0x0000, 0x02e1, 0x0000, 0x0000, 0x02e4, 0x0000, 0x02e6,
+ 0x0000, 0x0000, 0x02e9, 0x0000, 0x02eb, 0x0000, 0x02ed, 0x0000,
+ 0x02ef, 0x02ef, 0x0000, 0x0000, 0x02f3, 0x02f2, 0x02f2, 0x0000,
+ 0x02f7, 0x0000, 0x02f9, 0x02f9, 0x02f9, 0x02f9, 0x02f9, 0x0000,
+ // Entry 300 - 33F
+ 0x02ff, 0x0300, 0x02ff, 0x0000, 0x0303, 0x0051, 0x00e6,
+} // Size: 1574 bytes
+
+// Total table size 1574 bytes (1KiB); checksum: 895AAF0B
diff --git a/vendor/golang.org/x/text/internal/language/compact/tables.go b/vendor/golang.org/x/text/internal/language/compact/tables.go
new file mode 100644
index 000000000..a09ed198a
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/language/compact/tables.go
@@ -0,0 +1,1015 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+package compact
+
+import "golang.org/x/text/internal/language"
+
+// CLDRVersion is the CLDR version from which the tables in this package are derived.
+const CLDRVersion = "32"
+
+// NumCompactTags is the number of common tags. The maximum tag is
+// NumCompactTags-1.
+const NumCompactTags = 775
+const (
+ undIndex ID = 0
+ afIndex ID = 1
+ afNAIndex ID = 2
+ afZAIndex ID = 3
+ agqIndex ID = 4
+ agqCMIndex ID = 5
+ akIndex ID = 6
+ akGHIndex ID = 7
+ amIndex ID = 8
+ amETIndex ID = 9
+ arIndex ID = 10
+ ar001Index ID = 11
+ arAEIndex ID = 12
+ arBHIndex ID = 13
+ arDJIndex ID = 14
+ arDZIndex ID = 15
+ arEGIndex ID = 16
+ arEHIndex ID = 17
+ arERIndex ID = 18
+ arILIndex ID = 19
+ arIQIndex ID = 20
+ arJOIndex ID = 21
+ arKMIndex ID = 22
+ arKWIndex ID = 23
+ arLBIndex ID = 24
+ arLYIndex ID = 25
+ arMAIndex ID = 26
+ arMRIndex ID = 27
+ arOMIndex ID = 28
+ arPSIndex ID = 29
+ arQAIndex ID = 30
+ arSAIndex ID = 31
+ arSDIndex ID = 32
+ arSOIndex ID = 33
+ arSSIndex ID = 34
+ arSYIndex ID = 35
+ arTDIndex ID = 36
+ arTNIndex ID = 37
+ arYEIndex ID = 38
+ arsIndex ID = 39
+ asIndex ID = 40
+ asINIndex ID = 41
+ asaIndex ID = 42
+ asaTZIndex ID = 43
+ astIndex ID = 44
+ astESIndex ID = 45
+ azIndex ID = 46
+ azCyrlIndex ID = 47
+ azCyrlAZIndex ID = 48
+ azLatnIndex ID = 49
+ azLatnAZIndex ID = 50
+ basIndex ID = 51
+ basCMIndex ID = 52
+ beIndex ID = 53
+ beBYIndex ID = 54
+ bemIndex ID = 55
+ bemZMIndex ID = 56
+ bezIndex ID = 57
+ bezTZIndex ID = 58
+ bgIndex ID = 59
+ bgBGIndex ID = 60
+ bhIndex ID = 61
+ bmIndex ID = 62
+ bmMLIndex ID = 63
+ bnIndex ID = 64
+ bnBDIndex ID = 65
+ bnINIndex ID = 66
+ boIndex ID = 67
+ boCNIndex ID = 68
+ boINIndex ID = 69
+ brIndex ID = 70
+ brFRIndex ID = 71
+ brxIndex ID = 72
+ brxINIndex ID = 73
+ bsIndex ID = 74
+ bsCyrlIndex ID = 75
+ bsCyrlBAIndex ID = 76
+ bsLatnIndex ID = 77
+ bsLatnBAIndex ID = 78
+ caIndex ID = 79
+ caADIndex ID = 80
+ caESIndex ID = 81
+ caFRIndex ID = 82
+ caITIndex ID = 83
+ ccpIndex ID = 84
+ ccpBDIndex ID = 85
+ ccpINIndex ID = 86
+ ceIndex ID = 87
+ ceRUIndex ID = 88
+ cggIndex ID = 89
+ cggUGIndex ID = 90
+ chrIndex ID = 91
+ chrUSIndex ID = 92
+ ckbIndex ID = 93
+ ckbIQIndex ID = 94
+ ckbIRIndex ID = 95
+ csIndex ID = 96
+ csCZIndex ID = 97
+ cuIndex ID = 98
+ cuRUIndex ID = 99
+ cyIndex ID = 100
+ cyGBIndex ID = 101
+ daIndex ID = 102
+ daDKIndex ID = 103
+ daGLIndex ID = 104
+ davIndex ID = 105
+ davKEIndex ID = 106
+ deIndex ID = 107
+ deATIndex ID = 108
+ deBEIndex ID = 109
+ deCHIndex ID = 110
+ deDEIndex ID = 111
+ deITIndex ID = 112
+ deLIIndex ID = 113
+ deLUIndex ID = 114
+ djeIndex ID = 115
+ djeNEIndex ID = 116
+ dsbIndex ID = 117
+ dsbDEIndex ID = 118
+ duaIndex ID = 119
+ duaCMIndex ID = 120
+ dvIndex ID = 121
+ dyoIndex ID = 122
+ dyoSNIndex ID = 123
+ dzIndex ID = 124
+ dzBTIndex ID = 125
+ ebuIndex ID = 126
+ ebuKEIndex ID = 127
+ eeIndex ID = 128
+ eeGHIndex ID = 129
+ eeTGIndex ID = 130
+ elIndex ID = 131
+ elCYIndex ID = 132
+ elGRIndex ID = 133
+ enIndex ID = 134
+ en001Index ID = 135
+ en150Index ID = 136
+ enAGIndex ID = 137
+ enAIIndex ID = 138
+ enASIndex ID = 139
+ enATIndex ID = 140
+ enAUIndex ID = 141
+ enBBIndex ID = 142
+ enBEIndex ID = 143
+ enBIIndex ID = 144
+ enBMIndex ID = 145
+ enBSIndex ID = 146
+ enBWIndex ID = 147
+ enBZIndex ID = 148
+ enCAIndex ID = 149
+ enCCIndex ID = 150
+ enCHIndex ID = 151
+ enCKIndex ID = 152
+ enCMIndex ID = 153
+ enCXIndex ID = 154
+ enCYIndex ID = 155
+ enDEIndex ID = 156
+ enDGIndex ID = 157
+ enDKIndex ID = 158
+ enDMIndex ID = 159
+ enERIndex ID = 160
+ enFIIndex ID = 161
+ enFJIndex ID = 162
+ enFKIndex ID = 163
+ enFMIndex ID = 164
+ enGBIndex ID = 165
+ enGDIndex ID = 166
+ enGGIndex ID = 167
+ enGHIndex ID = 168
+ enGIIndex ID = 169
+ enGMIndex ID = 170
+ enGUIndex ID = 171
+ enGYIndex ID = 172
+ enHKIndex ID = 173
+ enIEIndex ID = 174
+ enILIndex ID = 175
+ enIMIndex ID = 176
+ enINIndex ID = 177
+ enIOIndex ID = 178
+ enJEIndex ID = 179
+ enJMIndex ID = 180
+ enKEIndex ID = 181
+ enKIIndex ID = 182
+ enKNIndex ID = 183
+ enKYIndex ID = 184
+ enLCIndex ID = 185
+ enLRIndex ID = 186
+ enLSIndex ID = 187
+ enMGIndex ID = 188
+ enMHIndex ID = 189
+ enMOIndex ID = 190
+ enMPIndex ID = 191
+ enMSIndex ID = 192
+ enMTIndex ID = 193
+ enMUIndex ID = 194
+ enMWIndex ID = 195
+ enMYIndex ID = 196
+ enNAIndex ID = 197
+ enNFIndex ID = 198
+ enNGIndex ID = 199
+ enNLIndex ID = 200
+ enNRIndex ID = 201
+ enNUIndex ID = 202
+ enNZIndex ID = 203
+ enPGIndex ID = 204
+ enPHIndex ID = 205
+ enPKIndex ID = 206
+ enPNIndex ID = 207
+ enPRIndex ID = 208
+ enPWIndex ID = 209
+ enRWIndex ID = 210
+ enSBIndex ID = 211
+ enSCIndex ID = 212
+ enSDIndex ID = 213
+ enSEIndex ID = 214
+ enSGIndex ID = 215
+ enSHIndex ID = 216
+ enSIIndex ID = 217
+ enSLIndex ID = 218
+ enSSIndex ID = 219
+ enSXIndex ID = 220
+ enSZIndex ID = 221
+ enTCIndex ID = 222
+ enTKIndex ID = 223
+ enTOIndex ID = 224
+ enTTIndex ID = 225
+ enTVIndex ID = 226
+ enTZIndex ID = 227
+ enUGIndex ID = 228
+ enUMIndex ID = 229
+ enUSIndex ID = 230
+ enVCIndex ID = 231
+ enVGIndex ID = 232
+ enVIIndex ID = 233
+ enVUIndex ID = 234
+ enWSIndex ID = 235
+ enZAIndex ID = 236
+ enZMIndex ID = 237
+ enZWIndex ID = 238
+ eoIndex ID = 239
+ eo001Index ID = 240
+ esIndex ID = 241
+ es419Index ID = 242
+ esARIndex ID = 243
+ esBOIndex ID = 244
+ esBRIndex ID = 245
+ esBZIndex ID = 246
+ esCLIndex ID = 247
+ esCOIndex ID = 248
+ esCRIndex ID = 249
+ esCUIndex ID = 250
+ esDOIndex ID = 251
+ esEAIndex ID = 252
+ esECIndex ID = 253
+ esESIndex ID = 254
+ esGQIndex ID = 255
+ esGTIndex ID = 256
+ esHNIndex ID = 257
+ esICIndex ID = 258
+ esMXIndex ID = 259
+ esNIIndex ID = 260
+ esPAIndex ID = 261
+ esPEIndex ID = 262
+ esPHIndex ID = 263
+ esPRIndex ID = 264
+ esPYIndex ID = 265
+ esSVIndex ID = 266
+ esUSIndex ID = 267
+ esUYIndex ID = 268
+ esVEIndex ID = 269
+ etIndex ID = 270
+ etEEIndex ID = 271
+ euIndex ID = 272
+ euESIndex ID = 273
+ ewoIndex ID = 274
+ ewoCMIndex ID = 275
+ faIndex ID = 276
+ faAFIndex ID = 277
+ faIRIndex ID = 278
+ ffIndex ID = 279
+ ffCMIndex ID = 280
+ ffGNIndex ID = 281
+ ffMRIndex ID = 282
+ ffSNIndex ID = 283
+ fiIndex ID = 284
+ fiFIIndex ID = 285
+ filIndex ID = 286
+ filPHIndex ID = 287
+ foIndex ID = 288
+ foDKIndex ID = 289
+ foFOIndex ID = 290
+ frIndex ID = 291
+ frBEIndex ID = 292
+ frBFIndex ID = 293
+ frBIIndex ID = 294
+ frBJIndex ID = 295
+ frBLIndex ID = 296
+ frCAIndex ID = 297
+ frCDIndex ID = 298
+ frCFIndex ID = 299
+ frCGIndex ID = 300
+ frCHIndex ID = 301
+ frCIIndex ID = 302
+ frCMIndex ID = 303
+ frDJIndex ID = 304
+ frDZIndex ID = 305
+ frFRIndex ID = 306
+ frGAIndex ID = 307
+ frGFIndex ID = 308
+ frGNIndex ID = 309
+ frGPIndex ID = 310
+ frGQIndex ID = 311
+ frHTIndex ID = 312
+ frKMIndex ID = 313
+ frLUIndex ID = 314
+ frMAIndex ID = 315
+ frMCIndex ID = 316
+ frMFIndex ID = 317
+ frMGIndex ID = 318
+ frMLIndex ID = 319
+ frMQIndex ID = 320
+ frMRIndex ID = 321
+ frMUIndex ID = 322
+ frNCIndex ID = 323
+ frNEIndex ID = 324
+ frPFIndex ID = 325
+ frPMIndex ID = 326
+ frREIndex ID = 327
+ frRWIndex ID = 328
+ frSCIndex ID = 329
+ frSNIndex ID = 330
+ frSYIndex ID = 331
+ frTDIndex ID = 332
+ frTGIndex ID = 333
+ frTNIndex ID = 334
+ frVUIndex ID = 335
+ frWFIndex ID = 336
+ frYTIndex ID = 337
+ furIndex ID = 338
+ furITIndex ID = 339
+ fyIndex ID = 340
+ fyNLIndex ID = 341
+ gaIndex ID = 342
+ gaIEIndex ID = 343
+ gdIndex ID = 344
+ gdGBIndex ID = 345
+ glIndex ID = 346
+ glESIndex ID = 347
+ gswIndex ID = 348
+ gswCHIndex ID = 349
+ gswFRIndex ID = 350
+ gswLIIndex ID = 351
+ guIndex ID = 352
+ guINIndex ID = 353
+ guwIndex ID = 354
+ guzIndex ID = 355
+ guzKEIndex ID = 356
+ gvIndex ID = 357
+ gvIMIndex ID = 358
+ haIndex ID = 359
+ haGHIndex ID = 360
+ haNEIndex ID = 361
+ haNGIndex ID = 362
+ hawIndex ID = 363
+ hawUSIndex ID = 364
+ heIndex ID = 365
+ heILIndex ID = 366
+ hiIndex ID = 367
+ hiINIndex ID = 368
+ hrIndex ID = 369
+ hrBAIndex ID = 370
+ hrHRIndex ID = 371
+ hsbIndex ID = 372
+ hsbDEIndex ID = 373
+ huIndex ID = 374
+ huHUIndex ID = 375
+ hyIndex ID = 376
+ hyAMIndex ID = 377
+ idIndex ID = 378
+ idIDIndex ID = 379
+ igIndex ID = 380
+ igNGIndex ID = 381
+ iiIndex ID = 382
+ iiCNIndex ID = 383
+ inIndex ID = 384
+ ioIndex ID = 385
+ isIndex ID = 386
+ isISIndex ID = 387
+ itIndex ID = 388
+ itCHIndex ID = 389
+ itITIndex ID = 390
+ itSMIndex ID = 391
+ itVAIndex ID = 392
+ iuIndex ID = 393
+ iwIndex ID = 394
+ jaIndex ID = 395
+ jaJPIndex ID = 396
+ jboIndex ID = 397
+ jgoIndex ID = 398
+ jgoCMIndex ID = 399
+ jiIndex ID = 400
+ jmcIndex ID = 401
+ jmcTZIndex ID = 402
+ jvIndex ID = 403
+ jwIndex ID = 404
+ kaIndex ID = 405
+ kaGEIndex ID = 406
+ kabIndex ID = 407
+ kabDZIndex ID = 408
+ kajIndex ID = 409
+ kamIndex ID = 410
+ kamKEIndex ID = 411
+ kcgIndex ID = 412
+ kdeIndex ID = 413
+ kdeTZIndex ID = 414
+ keaIndex ID = 415
+ keaCVIndex ID = 416
+ khqIndex ID = 417
+ khqMLIndex ID = 418
+ kiIndex ID = 419
+ kiKEIndex ID = 420
+ kkIndex ID = 421
+ kkKZIndex ID = 422
+ kkjIndex ID = 423
+ kkjCMIndex ID = 424
+ klIndex ID = 425
+ klGLIndex ID = 426
+ klnIndex ID = 427
+ klnKEIndex ID = 428
+ kmIndex ID = 429
+ kmKHIndex ID = 430
+ knIndex ID = 431
+ knINIndex ID = 432
+ koIndex ID = 433
+ koKPIndex ID = 434
+ koKRIndex ID = 435
+ kokIndex ID = 436
+ kokINIndex ID = 437
+ ksIndex ID = 438
+ ksINIndex ID = 439
+ ksbIndex ID = 440
+ ksbTZIndex ID = 441
+ ksfIndex ID = 442
+ ksfCMIndex ID = 443
+ kshIndex ID = 444
+ kshDEIndex ID = 445
+ kuIndex ID = 446
+ kwIndex ID = 447
+ kwGBIndex ID = 448
+ kyIndex ID = 449
+ kyKGIndex ID = 450
+ lagIndex ID = 451
+ lagTZIndex ID = 452
+ lbIndex ID = 453
+ lbLUIndex ID = 454
+ lgIndex ID = 455
+ lgUGIndex ID = 456
+ lktIndex ID = 457
+ lktUSIndex ID = 458
+ lnIndex ID = 459
+ lnAOIndex ID = 460
+ lnCDIndex ID = 461
+ lnCFIndex ID = 462
+ lnCGIndex ID = 463
+ loIndex ID = 464
+ loLAIndex ID = 465
+ lrcIndex ID = 466
+ lrcIQIndex ID = 467
+ lrcIRIndex ID = 468
+ ltIndex ID = 469
+ ltLTIndex ID = 470
+ luIndex ID = 471
+ luCDIndex ID = 472
+ luoIndex ID = 473
+ luoKEIndex ID = 474
+ luyIndex ID = 475
+ luyKEIndex ID = 476
+ lvIndex ID = 477
+ lvLVIndex ID = 478
+ masIndex ID = 479
+ masKEIndex ID = 480
+ masTZIndex ID = 481
+ merIndex ID = 482
+ merKEIndex ID = 483
+ mfeIndex ID = 484
+ mfeMUIndex ID = 485
+ mgIndex ID = 486
+ mgMGIndex ID = 487
+ mghIndex ID = 488
+ mghMZIndex ID = 489
+ mgoIndex ID = 490
+ mgoCMIndex ID = 491
+ mkIndex ID = 492
+ mkMKIndex ID = 493
+ mlIndex ID = 494
+ mlINIndex ID = 495
+ mnIndex ID = 496
+ mnMNIndex ID = 497
+ moIndex ID = 498
+ mrIndex ID = 499
+ mrINIndex ID = 500
+ msIndex ID = 501
+ msBNIndex ID = 502
+ msMYIndex ID = 503
+ msSGIndex ID = 504
+ mtIndex ID = 505
+ mtMTIndex ID = 506
+ muaIndex ID = 507
+ muaCMIndex ID = 508
+ myIndex ID = 509
+ myMMIndex ID = 510
+ mznIndex ID = 511
+ mznIRIndex ID = 512
+ nahIndex ID = 513
+ naqIndex ID = 514
+ naqNAIndex ID = 515
+ nbIndex ID = 516
+ nbNOIndex ID = 517
+ nbSJIndex ID = 518
+ ndIndex ID = 519
+ ndZWIndex ID = 520
+ ndsIndex ID = 521
+ ndsDEIndex ID = 522
+ ndsNLIndex ID = 523
+ neIndex ID = 524
+ neINIndex ID = 525
+ neNPIndex ID = 526
+ nlIndex ID = 527
+ nlAWIndex ID = 528
+ nlBEIndex ID = 529
+ nlBQIndex ID = 530
+ nlCWIndex ID = 531
+ nlNLIndex ID = 532
+ nlSRIndex ID = 533
+ nlSXIndex ID = 534
+ nmgIndex ID = 535
+ nmgCMIndex ID = 536
+ nnIndex ID = 537
+ nnNOIndex ID = 538
+ nnhIndex ID = 539
+ nnhCMIndex ID = 540
+ noIndex ID = 541
+ nqoIndex ID = 542
+ nrIndex ID = 543
+ nsoIndex ID = 544
+ nusIndex ID = 545
+ nusSSIndex ID = 546
+ nyIndex ID = 547
+ nynIndex ID = 548
+ nynUGIndex ID = 549
+ omIndex ID = 550
+ omETIndex ID = 551
+ omKEIndex ID = 552
+ orIndex ID = 553
+ orINIndex ID = 554
+ osIndex ID = 555
+ osGEIndex ID = 556
+ osRUIndex ID = 557
+ paIndex ID = 558
+ paArabIndex ID = 559
+ paArabPKIndex ID = 560
+ paGuruIndex ID = 561
+ paGuruINIndex ID = 562
+ papIndex ID = 563
+ plIndex ID = 564
+ plPLIndex ID = 565
+ prgIndex ID = 566
+ prg001Index ID = 567
+ psIndex ID = 568
+ psAFIndex ID = 569
+ ptIndex ID = 570
+ ptAOIndex ID = 571
+ ptBRIndex ID = 572
+ ptCHIndex ID = 573
+ ptCVIndex ID = 574
+ ptGQIndex ID = 575
+ ptGWIndex ID = 576
+ ptLUIndex ID = 577
+ ptMOIndex ID = 578
+ ptMZIndex ID = 579
+ ptPTIndex ID = 580
+ ptSTIndex ID = 581
+ ptTLIndex ID = 582
+ quIndex ID = 583
+ quBOIndex ID = 584
+ quECIndex ID = 585
+ quPEIndex ID = 586
+ rmIndex ID = 587
+ rmCHIndex ID = 588
+ rnIndex ID = 589
+ rnBIIndex ID = 590
+ roIndex ID = 591
+ roMDIndex ID = 592
+ roROIndex ID = 593
+ rofIndex ID = 594
+ rofTZIndex ID = 595
+ ruIndex ID = 596
+ ruBYIndex ID = 597
+ ruKGIndex ID = 598
+ ruKZIndex ID = 599
+ ruMDIndex ID = 600
+ ruRUIndex ID = 601
+ ruUAIndex ID = 602
+ rwIndex ID = 603
+ rwRWIndex ID = 604
+ rwkIndex ID = 605
+ rwkTZIndex ID = 606
+ sahIndex ID = 607
+ sahRUIndex ID = 608
+ saqIndex ID = 609
+ saqKEIndex ID = 610
+ sbpIndex ID = 611
+ sbpTZIndex ID = 612
+ sdIndex ID = 613
+ sdPKIndex ID = 614
+ sdhIndex ID = 615
+ seIndex ID = 616
+ seFIIndex ID = 617
+ seNOIndex ID = 618
+ seSEIndex ID = 619
+ sehIndex ID = 620
+ sehMZIndex ID = 621
+ sesIndex ID = 622
+ sesMLIndex ID = 623
+ sgIndex ID = 624
+ sgCFIndex ID = 625
+ shIndex ID = 626
+ shiIndex ID = 627
+ shiLatnIndex ID = 628
+ shiLatnMAIndex ID = 629
+ shiTfngIndex ID = 630
+ shiTfngMAIndex ID = 631
+ siIndex ID = 632
+ siLKIndex ID = 633
+ skIndex ID = 634
+ skSKIndex ID = 635
+ slIndex ID = 636
+ slSIIndex ID = 637
+ smaIndex ID = 638
+ smiIndex ID = 639
+ smjIndex ID = 640
+ smnIndex ID = 641
+ smnFIIndex ID = 642
+ smsIndex ID = 643
+ snIndex ID = 644
+ snZWIndex ID = 645
+ soIndex ID = 646
+ soDJIndex ID = 647
+ soETIndex ID = 648
+ soKEIndex ID = 649
+ soSOIndex ID = 650
+ sqIndex ID = 651
+ sqALIndex ID = 652
+ sqMKIndex ID = 653
+ sqXKIndex ID = 654
+ srIndex ID = 655
+ srCyrlIndex ID = 656
+ srCyrlBAIndex ID = 657
+ srCyrlMEIndex ID = 658
+ srCyrlRSIndex ID = 659
+ srCyrlXKIndex ID = 660
+ srLatnIndex ID = 661
+ srLatnBAIndex ID = 662
+ srLatnMEIndex ID = 663
+ srLatnRSIndex ID = 664
+ srLatnXKIndex ID = 665
+ ssIndex ID = 666
+ ssyIndex ID = 667
+ stIndex ID = 668
+ svIndex ID = 669
+ svAXIndex ID = 670
+ svFIIndex ID = 671
+ svSEIndex ID = 672
+ swIndex ID = 673
+ swCDIndex ID = 674
+ swKEIndex ID = 675
+ swTZIndex ID = 676
+ swUGIndex ID = 677
+ syrIndex ID = 678
+ taIndex ID = 679
+ taINIndex ID = 680
+ taLKIndex ID = 681
+ taMYIndex ID = 682
+ taSGIndex ID = 683
+ teIndex ID = 684
+ teINIndex ID = 685
+ teoIndex ID = 686
+ teoKEIndex ID = 687
+ teoUGIndex ID = 688
+ tgIndex ID = 689
+ tgTJIndex ID = 690
+ thIndex ID = 691
+ thTHIndex ID = 692
+ tiIndex ID = 693
+ tiERIndex ID = 694
+ tiETIndex ID = 695
+ tigIndex ID = 696
+ tkIndex ID = 697
+ tkTMIndex ID = 698
+ tlIndex ID = 699
+ tnIndex ID = 700
+ toIndex ID = 701
+ toTOIndex ID = 702
+ trIndex ID = 703
+ trCYIndex ID = 704
+ trTRIndex ID = 705
+ tsIndex ID = 706
+ ttIndex ID = 707
+ ttRUIndex ID = 708
+ twqIndex ID = 709
+ twqNEIndex ID = 710
+ tzmIndex ID = 711
+ tzmMAIndex ID = 712
+ ugIndex ID = 713
+ ugCNIndex ID = 714
+ ukIndex ID = 715
+ ukUAIndex ID = 716
+ urIndex ID = 717
+ urINIndex ID = 718
+ urPKIndex ID = 719
+ uzIndex ID = 720
+ uzArabIndex ID = 721
+ uzArabAFIndex ID = 722
+ uzCyrlIndex ID = 723
+ uzCyrlUZIndex ID = 724
+ uzLatnIndex ID = 725
+ uzLatnUZIndex ID = 726
+ vaiIndex ID = 727
+ vaiLatnIndex ID = 728
+ vaiLatnLRIndex ID = 729
+ vaiVaiiIndex ID = 730
+ vaiVaiiLRIndex ID = 731
+ veIndex ID = 732
+ viIndex ID = 733
+ viVNIndex ID = 734
+ voIndex ID = 735
+ vo001Index ID = 736
+ vunIndex ID = 737
+ vunTZIndex ID = 738
+ waIndex ID = 739
+ waeIndex ID = 740
+ waeCHIndex ID = 741
+ woIndex ID = 742
+ woSNIndex ID = 743
+ xhIndex ID = 744
+ xogIndex ID = 745
+ xogUGIndex ID = 746
+ yavIndex ID = 747
+ yavCMIndex ID = 748
+ yiIndex ID = 749
+ yi001Index ID = 750
+ yoIndex ID = 751
+ yoBJIndex ID = 752
+ yoNGIndex ID = 753
+ yueIndex ID = 754
+ yueHansIndex ID = 755
+ yueHansCNIndex ID = 756
+ yueHantIndex ID = 757
+ yueHantHKIndex ID = 758
+ zghIndex ID = 759
+ zghMAIndex ID = 760
+ zhIndex ID = 761
+ zhHansIndex ID = 762
+ zhHansCNIndex ID = 763
+ zhHansHKIndex ID = 764
+ zhHansMOIndex ID = 765
+ zhHansSGIndex ID = 766
+ zhHantIndex ID = 767
+ zhHantHKIndex ID = 768
+ zhHantMOIndex ID = 769
+ zhHantTWIndex ID = 770
+ zuIndex ID = 771
+ zuZAIndex ID = 772
+ caESvalenciaIndex ID = 773
+ enUSuvaposixIndex ID = 774
+)
+
+var coreTags = []language.CompactCoreInfo{ // 773 elements
+ // Entry 0 - 1F
+ 0x00000000, 0x01600000, 0x016000d3, 0x01600162,
+ 0x01c00000, 0x01c00052, 0x02100000, 0x02100081,
+ 0x02700000, 0x02700070, 0x03a00000, 0x03a00001,
+ 0x03a00023, 0x03a00039, 0x03a00063, 0x03a00068,
+ 0x03a0006c, 0x03a0006d, 0x03a0006e, 0x03a00098,
+ 0x03a0009c, 0x03a000a2, 0x03a000a9, 0x03a000ad,
+ 0x03a000b1, 0x03a000ba, 0x03a000bb, 0x03a000ca,
+ 0x03a000e2, 0x03a000ee, 0x03a000f4, 0x03a00109,
+ // Entry 20 - 3F
+ 0x03a0010c, 0x03a00116, 0x03a00118, 0x03a0011d,
+ 0x03a00121, 0x03a00129, 0x03a0015f, 0x04000000,
+ 0x04300000, 0x0430009a, 0x04400000, 0x04400130,
+ 0x04800000, 0x0480006f, 0x05800000, 0x05820000,
+ 0x05820032, 0x0585b000, 0x0585b032, 0x05e00000,
+ 0x05e00052, 0x07100000, 0x07100047, 0x07500000,
+ 0x07500163, 0x07900000, 0x07900130, 0x07e00000,
+ 0x07e00038, 0x08200000, 0x0a000000, 0x0a0000c4,
+ // Entry 40 - 5F
+ 0x0a500000, 0x0a500035, 0x0a50009a, 0x0a900000,
+ 0x0a900053, 0x0a90009a, 0x0b200000, 0x0b200079,
+ 0x0b500000, 0x0b50009a, 0x0b700000, 0x0b720000,
+ 0x0b720033, 0x0b75b000, 0x0b75b033, 0x0d700000,
+ 0x0d700022, 0x0d70006f, 0x0d700079, 0x0d70009f,
+ 0x0db00000, 0x0db00035, 0x0db0009a, 0x0dc00000,
+ 0x0dc00107, 0x0df00000, 0x0df00132, 0x0e500000,
+ 0x0e500136, 0x0e900000, 0x0e90009c, 0x0e90009d,
+ // Entry 60 - 7F
+ 0x0fa00000, 0x0fa0005f, 0x0fe00000, 0x0fe00107,
+ 0x10000000, 0x1000007c, 0x10100000, 0x10100064,
+ 0x10100083, 0x10800000, 0x108000a5, 0x10d00000,
+ 0x10d0002e, 0x10d00036, 0x10d0004e, 0x10d00061,
+ 0x10d0009f, 0x10d000b3, 0x10d000b8, 0x11700000,
+ 0x117000d5, 0x11f00000, 0x11f00061, 0x12400000,
+ 0x12400052, 0x12800000, 0x12b00000, 0x12b00115,
+ 0x12d00000, 0x12d00043, 0x12f00000, 0x12f000a5,
+ // Entry 80 - 9F
+ 0x13000000, 0x13000081, 0x13000123, 0x13600000,
+ 0x1360005e, 0x13600088, 0x13900000, 0x13900001,
+ 0x1390001a, 0x13900025, 0x13900026, 0x1390002d,
+ 0x1390002e, 0x1390002f, 0x13900034, 0x13900036,
+ 0x1390003a, 0x1390003d, 0x13900042, 0x13900046,
+ 0x13900048, 0x13900049, 0x1390004a, 0x1390004e,
+ 0x13900050, 0x13900052, 0x1390005d, 0x1390005e,
+ 0x13900061, 0x13900062, 0x13900064, 0x13900065,
+ // Entry A0 - BF
+ 0x1390006e, 0x13900073, 0x13900074, 0x13900075,
+ 0x13900076, 0x1390007c, 0x1390007d, 0x13900080,
+ 0x13900081, 0x13900082, 0x13900084, 0x1390008b,
+ 0x1390008d, 0x1390008e, 0x13900097, 0x13900098,
+ 0x13900099, 0x1390009a, 0x1390009b, 0x139000a0,
+ 0x139000a1, 0x139000a5, 0x139000a8, 0x139000aa,
+ 0x139000ae, 0x139000b2, 0x139000b5, 0x139000b6,
+ 0x139000c0, 0x139000c1, 0x139000c7, 0x139000c8,
+ // Entry C0 - DF
+ 0x139000cb, 0x139000cc, 0x139000cd, 0x139000cf,
+ 0x139000d1, 0x139000d3, 0x139000d6, 0x139000d7,
+ 0x139000da, 0x139000de, 0x139000e0, 0x139000e1,
+ 0x139000e7, 0x139000e8, 0x139000e9, 0x139000ec,
+ 0x139000ed, 0x139000f1, 0x13900108, 0x1390010a,
+ 0x1390010b, 0x1390010c, 0x1390010d, 0x1390010e,
+ 0x1390010f, 0x13900110, 0x13900113, 0x13900118,
+ 0x1390011c, 0x1390011e, 0x13900120, 0x13900126,
+ // Entry E0 - FF
+ 0x1390012a, 0x1390012d, 0x1390012e, 0x13900130,
+ 0x13900132, 0x13900134, 0x13900136, 0x1390013a,
+ 0x1390013d, 0x1390013e, 0x13900140, 0x13900143,
+ 0x13900162, 0x13900163, 0x13900165, 0x13c00000,
+ 0x13c00001, 0x13e00000, 0x13e0001f, 0x13e0002c,
+ 0x13e0003f, 0x13e00041, 0x13e00048, 0x13e00051,
+ 0x13e00054, 0x13e00057, 0x13e0005a, 0x13e00066,
+ 0x13e00069, 0x13e0006a, 0x13e0006f, 0x13e00087,
+ // Entry 100 - 11F
+ 0x13e0008a, 0x13e00090, 0x13e00095, 0x13e000d0,
+ 0x13e000d9, 0x13e000e3, 0x13e000e5, 0x13e000e8,
+ 0x13e000ed, 0x13e000f2, 0x13e0011b, 0x13e00136,
+ 0x13e00137, 0x13e0013c, 0x14000000, 0x1400006b,
+ 0x14500000, 0x1450006f, 0x14600000, 0x14600052,
+ 0x14800000, 0x14800024, 0x1480009d, 0x14e00000,
+ 0x14e00052, 0x14e00085, 0x14e000ca, 0x14e00115,
+ 0x15100000, 0x15100073, 0x15300000, 0x153000e8,
+ // Entry 120 - 13F
+ 0x15800000, 0x15800064, 0x15800077, 0x15e00000,
+ 0x15e00036, 0x15e00037, 0x15e0003a, 0x15e0003b,
+ 0x15e0003c, 0x15e00049, 0x15e0004b, 0x15e0004c,
+ 0x15e0004d, 0x15e0004e, 0x15e0004f, 0x15e00052,
+ 0x15e00063, 0x15e00068, 0x15e00079, 0x15e0007b,
+ 0x15e0007f, 0x15e00085, 0x15e00086, 0x15e00087,
+ 0x15e00092, 0x15e000a9, 0x15e000b8, 0x15e000bb,
+ 0x15e000bc, 0x15e000bf, 0x15e000c0, 0x15e000c4,
+ // Entry 140 - 15F
+ 0x15e000c9, 0x15e000ca, 0x15e000cd, 0x15e000d4,
+ 0x15e000d5, 0x15e000e6, 0x15e000eb, 0x15e00103,
+ 0x15e00108, 0x15e0010b, 0x15e00115, 0x15e0011d,
+ 0x15e00121, 0x15e00123, 0x15e00129, 0x15e00140,
+ 0x15e00141, 0x15e00160, 0x16900000, 0x1690009f,
+ 0x16d00000, 0x16d000da, 0x16e00000, 0x16e00097,
+ 0x17e00000, 0x17e0007c, 0x19000000, 0x1900006f,
+ 0x1a300000, 0x1a30004e, 0x1a300079, 0x1a3000b3,
+ // Entry 160 - 17F
+ 0x1a400000, 0x1a40009a, 0x1a900000, 0x1ab00000,
+ 0x1ab000a5, 0x1ac00000, 0x1ac00099, 0x1b400000,
+ 0x1b400081, 0x1b4000d5, 0x1b4000d7, 0x1b800000,
+ 0x1b800136, 0x1bc00000, 0x1bc00098, 0x1be00000,
+ 0x1be0009a, 0x1d100000, 0x1d100033, 0x1d100091,
+ 0x1d200000, 0x1d200061, 0x1d500000, 0x1d500093,
+ 0x1d700000, 0x1d700028, 0x1e100000, 0x1e100096,
+ 0x1e700000, 0x1e7000d7, 0x1ea00000, 0x1ea00053,
+ // Entry 180 - 19F
+ 0x1f300000, 0x1f500000, 0x1f800000, 0x1f80009e,
+ 0x1f900000, 0x1f90004e, 0x1f90009f, 0x1f900114,
+ 0x1f900139, 0x1fa00000, 0x1fb00000, 0x20000000,
+ 0x200000a3, 0x20300000, 0x20700000, 0x20700052,
+ 0x20800000, 0x20a00000, 0x20a00130, 0x20e00000,
+ 0x20f00000, 0x21000000, 0x2100007e, 0x21200000,
+ 0x21200068, 0x21600000, 0x21700000, 0x217000a5,
+ 0x21f00000, 0x22300000, 0x22300130, 0x22700000,
+ // Entry 1A0 - 1BF
+ 0x2270005b, 0x23400000, 0x234000c4, 0x23900000,
+ 0x239000a5, 0x24200000, 0x242000af, 0x24400000,
+ 0x24400052, 0x24500000, 0x24500083, 0x24600000,
+ 0x246000a5, 0x24a00000, 0x24a000a7, 0x25100000,
+ 0x2510009a, 0x25400000, 0x254000ab, 0x254000ac,
+ 0x25600000, 0x2560009a, 0x26a00000, 0x26a0009a,
+ 0x26b00000, 0x26b00130, 0x26d00000, 0x26d00052,
+ 0x26e00000, 0x26e00061, 0x27400000, 0x28100000,
+ // Entry 1C0 - 1DF
+ 0x2810007c, 0x28a00000, 0x28a000a6, 0x29100000,
+ 0x29100130, 0x29500000, 0x295000b8, 0x2a300000,
+ 0x2a300132, 0x2af00000, 0x2af00136, 0x2b500000,
+ 0x2b50002a, 0x2b50004b, 0x2b50004c, 0x2b50004d,
+ 0x2b800000, 0x2b8000b0, 0x2bf00000, 0x2bf0009c,
+ 0x2bf0009d, 0x2c000000, 0x2c0000b7, 0x2c200000,
+ 0x2c20004b, 0x2c400000, 0x2c4000a5, 0x2c500000,
+ 0x2c5000a5, 0x2c700000, 0x2c7000b9, 0x2d100000,
+ // Entry 1E0 - 1FF
+ 0x2d1000a5, 0x2d100130, 0x2e900000, 0x2e9000a5,
+ 0x2ed00000, 0x2ed000cd, 0x2f100000, 0x2f1000c0,
+ 0x2f200000, 0x2f2000d2, 0x2f400000, 0x2f400052,
+ 0x2ff00000, 0x2ff000c3, 0x30400000, 0x3040009a,
+ 0x30b00000, 0x30b000c6, 0x31000000, 0x31b00000,
+ 0x31b0009a, 0x31f00000, 0x31f0003e, 0x31f000d1,
+ 0x31f0010e, 0x32000000, 0x320000cc, 0x32500000,
+ 0x32500052, 0x33100000, 0x331000c5, 0x33a00000,
+ // Entry 200 - 21F
+ 0x33a0009d, 0x34100000, 0x34500000, 0x345000d3,
+ 0x34700000, 0x347000db, 0x34700111, 0x34e00000,
+ 0x34e00165, 0x35000000, 0x35000061, 0x350000da,
+ 0x35100000, 0x3510009a, 0x351000dc, 0x36700000,
+ 0x36700030, 0x36700036, 0x36700040, 0x3670005c,
+ 0x367000da, 0x36700117, 0x3670011c, 0x36800000,
+ 0x36800052, 0x36a00000, 0x36a000db, 0x36c00000,
+ 0x36c00052, 0x36f00000, 0x37500000, 0x37600000,
+ // Entry 220 - 23F
+ 0x37a00000, 0x38000000, 0x38000118, 0x38700000,
+ 0x38900000, 0x38900132, 0x39000000, 0x39000070,
+ 0x390000a5, 0x39500000, 0x3950009a, 0x39800000,
+ 0x3980007e, 0x39800107, 0x39d00000, 0x39d05000,
+ 0x39d050e9, 0x39d36000, 0x39d3609a, 0x3a100000,
+ 0x3b300000, 0x3b3000ea, 0x3bd00000, 0x3bd00001,
+ 0x3be00000, 0x3be00024, 0x3c000000, 0x3c00002a,
+ 0x3c000041, 0x3c00004e, 0x3c00005b, 0x3c000087,
+ // Entry 240 - 25F
+ 0x3c00008c, 0x3c0000b8, 0x3c0000c7, 0x3c0000d2,
+ 0x3c0000ef, 0x3c000119, 0x3c000127, 0x3c400000,
+ 0x3c40003f, 0x3c40006a, 0x3c4000e5, 0x3d400000,
+ 0x3d40004e, 0x3d900000, 0x3d90003a, 0x3dc00000,
+ 0x3dc000bd, 0x3dc00105, 0x3de00000, 0x3de00130,
+ 0x3e200000, 0x3e200047, 0x3e2000a6, 0x3e2000af,
+ 0x3e2000bd, 0x3e200107, 0x3e200131, 0x3e500000,
+ 0x3e500108, 0x3e600000, 0x3e600130, 0x3eb00000,
+ // Entry 260 - 27F
+ 0x3eb00107, 0x3ec00000, 0x3ec000a5, 0x3f300000,
+ 0x3f300130, 0x3fa00000, 0x3fa000e9, 0x3fc00000,
+ 0x3fd00000, 0x3fd00073, 0x3fd000db, 0x3fd0010d,
+ 0x3ff00000, 0x3ff000d2, 0x40100000, 0x401000c4,
+ 0x40200000, 0x4020004c, 0x40700000, 0x40800000,
+ 0x4085b000, 0x4085b0bb, 0x408eb000, 0x408eb0bb,
+ 0x40c00000, 0x40c000b4, 0x41200000, 0x41200112,
+ 0x41600000, 0x41600110, 0x41c00000, 0x41d00000,
+ // Entry 280 - 29F
+ 0x41e00000, 0x41f00000, 0x41f00073, 0x42200000,
+ 0x42300000, 0x42300165, 0x42900000, 0x42900063,
+ 0x42900070, 0x429000a5, 0x42900116, 0x43100000,
+ 0x43100027, 0x431000c3, 0x4310014e, 0x43200000,
+ 0x43220000, 0x43220033, 0x432200be, 0x43220106,
+ 0x4322014e, 0x4325b000, 0x4325b033, 0x4325b0be,
+ 0x4325b106, 0x4325b14e, 0x43700000, 0x43a00000,
+ 0x43b00000, 0x44400000, 0x44400031, 0x44400073,
+ // Entry 2A0 - 2BF
+ 0x4440010d, 0x44500000, 0x4450004b, 0x445000a5,
+ 0x44500130, 0x44500132, 0x44e00000, 0x45000000,
+ 0x4500009a, 0x450000b4, 0x450000d1, 0x4500010e,
+ 0x46100000, 0x4610009a, 0x46400000, 0x464000a5,
+ 0x46400132, 0x46700000, 0x46700125, 0x46b00000,
+ 0x46b00124, 0x46f00000, 0x46f0006e, 0x46f00070,
+ 0x47100000, 0x47600000, 0x47600128, 0x47a00000,
+ 0x48000000, 0x48200000, 0x4820012a, 0x48a00000,
+ // Entry 2C0 - 2DF
+ 0x48a0005e, 0x48a0012c, 0x48e00000, 0x49400000,
+ 0x49400107, 0x4a400000, 0x4a4000d5, 0x4a900000,
+ 0x4a9000bb, 0x4ac00000, 0x4ac00053, 0x4ae00000,
+ 0x4ae00131, 0x4b400000, 0x4b40009a, 0x4b4000e9,
+ 0x4bc00000, 0x4bc05000, 0x4bc05024, 0x4bc20000,
+ 0x4bc20138, 0x4bc5b000, 0x4bc5b138, 0x4be00000,
+ 0x4be5b000, 0x4be5b0b5, 0x4bef4000, 0x4bef40b5,
+ 0x4c000000, 0x4c300000, 0x4c30013f, 0x4c900000,
+ // Entry 2E0 - 2FF
+ 0x4c900001, 0x4cc00000, 0x4cc00130, 0x4ce00000,
+ 0x4cf00000, 0x4cf0004e, 0x4e500000, 0x4e500115,
+ 0x4f200000, 0x4fb00000, 0x4fb00132, 0x50900000,
+ 0x50900052, 0x51200000, 0x51200001, 0x51800000,
+ 0x5180003b, 0x518000d7, 0x51f00000, 0x51f3b000,
+ 0x51f3b053, 0x51f3c000, 0x51f3c08e, 0x52800000,
+ 0x528000bb, 0x52900000, 0x5293b000, 0x5293b053,
+ 0x5293b08e, 0x5293b0c7, 0x5293b10e, 0x5293c000,
+ // Entry 300 - 31F
+ 0x5293c08e, 0x5293c0c7, 0x5293c12f, 0x52f00000,
+ 0x52f00162,
+} // Size: 3116 bytes
+
+const specialTagsStr string = "ca-ES-valencia en-US-u-va-posix"
+
+// Total table size 3147 bytes (3KiB); checksum: 5A8FFFA5
diff --git a/vendor/golang.org/x/text/internal/language/compact/tags.go b/vendor/golang.org/x/text/internal/language/compact/tags.go
new file mode 100644
index 000000000..ca135d295
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/language/compact/tags.go
@@ -0,0 +1,91 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package compact
+
+var (
+ und = Tag{}
+
+ Und Tag = Tag{}
+
+ Afrikaans Tag = Tag{language: afIndex, locale: afIndex}
+ Amharic Tag = Tag{language: amIndex, locale: amIndex}
+ Arabic Tag = Tag{language: arIndex, locale: arIndex}
+ ModernStandardArabic Tag = Tag{language: ar001Index, locale: ar001Index}
+ Azerbaijani Tag = Tag{language: azIndex, locale: azIndex}
+ Bulgarian Tag = Tag{language: bgIndex, locale: bgIndex}
+ Bengali Tag = Tag{language: bnIndex, locale: bnIndex}
+ Catalan Tag = Tag{language: caIndex, locale: caIndex}
+ Czech Tag = Tag{language: csIndex, locale: csIndex}
+ Danish Tag = Tag{language: daIndex, locale: daIndex}
+ German Tag = Tag{language: deIndex, locale: deIndex}
+ Greek Tag = Tag{language: elIndex, locale: elIndex}
+ English Tag = Tag{language: enIndex, locale: enIndex}
+ AmericanEnglish Tag = Tag{language: enUSIndex, locale: enUSIndex}
+ BritishEnglish Tag = Tag{language: enGBIndex, locale: enGBIndex}
+ Spanish Tag = Tag{language: esIndex, locale: esIndex}
+ EuropeanSpanish Tag = Tag{language: esESIndex, locale: esESIndex}
+ LatinAmericanSpanish Tag = Tag{language: es419Index, locale: es419Index}
+ Estonian Tag = Tag{language: etIndex, locale: etIndex}
+ Persian Tag = Tag{language: faIndex, locale: faIndex}
+ Finnish Tag = Tag{language: fiIndex, locale: fiIndex}
+ Filipino Tag = Tag{language: filIndex, locale: filIndex}
+ French Tag = Tag{language: frIndex, locale: frIndex}
+ CanadianFrench Tag = Tag{language: frCAIndex, locale: frCAIndex}
+ Gujarati Tag = Tag{language: guIndex, locale: guIndex}
+ Hebrew Tag = Tag{language: heIndex, locale: heIndex}
+ Hindi Tag = Tag{language: hiIndex, locale: hiIndex}
+ Croatian Tag = Tag{language: hrIndex, locale: hrIndex}
+ Hungarian Tag = Tag{language: huIndex, locale: huIndex}
+ Armenian Tag = Tag{language: hyIndex, locale: hyIndex}
+ Indonesian Tag = Tag{language: idIndex, locale: idIndex}
+ Icelandic Tag = Tag{language: isIndex, locale: isIndex}
+ Italian Tag = Tag{language: itIndex, locale: itIndex}
+ Japanese Tag = Tag{language: jaIndex, locale: jaIndex}
+ Georgian Tag = Tag{language: kaIndex, locale: kaIndex}
+ Kazakh Tag = Tag{language: kkIndex, locale: kkIndex}
+ Khmer Tag = Tag{language: kmIndex, locale: kmIndex}
+ Kannada Tag = Tag{language: knIndex, locale: knIndex}
+ Korean Tag = Tag{language: koIndex, locale: koIndex}
+ Kirghiz Tag = Tag{language: kyIndex, locale: kyIndex}
+ Lao Tag = Tag{language: loIndex, locale: loIndex}
+ Lithuanian Tag = Tag{language: ltIndex, locale: ltIndex}
+ Latvian Tag = Tag{language: lvIndex, locale: lvIndex}
+ Macedonian Tag = Tag{language: mkIndex, locale: mkIndex}
+ Malayalam Tag = Tag{language: mlIndex, locale: mlIndex}
+ Mongolian Tag = Tag{language: mnIndex, locale: mnIndex}
+ Marathi Tag = Tag{language: mrIndex, locale: mrIndex}
+ Malay Tag = Tag{language: msIndex, locale: msIndex}
+ Burmese Tag = Tag{language: myIndex, locale: myIndex}
+ Nepali Tag = Tag{language: neIndex, locale: neIndex}
+ Dutch Tag = Tag{language: nlIndex, locale: nlIndex}
+ Norwegian Tag = Tag{language: noIndex, locale: noIndex}
+ Punjabi Tag = Tag{language: paIndex, locale: paIndex}
+ Polish Tag = Tag{language: plIndex, locale: plIndex}
+ Portuguese Tag = Tag{language: ptIndex, locale: ptIndex}
+ BrazilianPortuguese Tag = Tag{language: ptBRIndex, locale: ptBRIndex}
+ EuropeanPortuguese Tag = Tag{language: ptPTIndex, locale: ptPTIndex}
+ Romanian Tag = Tag{language: roIndex, locale: roIndex}
+ Russian Tag = Tag{language: ruIndex, locale: ruIndex}
+ Sinhala Tag = Tag{language: siIndex, locale: siIndex}
+ Slovak Tag = Tag{language: skIndex, locale: skIndex}
+ Slovenian Tag = Tag{language: slIndex, locale: slIndex}
+ Albanian Tag = Tag{language: sqIndex, locale: sqIndex}
+ Serbian Tag = Tag{language: srIndex, locale: srIndex}
+ SerbianLatin Tag = Tag{language: srLatnIndex, locale: srLatnIndex}
+ Swedish Tag = Tag{language: svIndex, locale: svIndex}
+ Swahili Tag = Tag{language: swIndex, locale: swIndex}
+ Tamil Tag = Tag{language: taIndex, locale: taIndex}
+ Telugu Tag = Tag{language: teIndex, locale: teIndex}
+ Thai Tag = Tag{language: thIndex, locale: thIndex}
+ Turkish Tag = Tag{language: trIndex, locale: trIndex}
+ Ukrainian Tag = Tag{language: ukIndex, locale: ukIndex}
+ Urdu Tag = Tag{language: urIndex, locale: urIndex}
+ Uzbek Tag = Tag{language: uzIndex, locale: uzIndex}
+ Vietnamese Tag = Tag{language: viIndex, locale: viIndex}
+ Chinese Tag = Tag{language: zhIndex, locale: zhIndex}
+ SimplifiedChinese Tag = Tag{language: zhHansIndex, locale: zhHansIndex}
+ TraditionalChinese Tag = Tag{language: zhHantIndex, locale: zhHantIndex}
+ Zulu Tag = Tag{language: zuIndex, locale: zuIndex}
+)
diff --git a/vendor/golang.org/x/text/internal/language/compose.go b/vendor/golang.org/x/text/internal/language/compose.go
new file mode 100644
index 000000000..4ae78e0fa
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/language/compose.go
@@ -0,0 +1,167 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package language
+
+import (
+ "sort"
+ "strings"
+)
+
+// A Builder allows constructing a Tag from individual components.
+// Its main user is Compose in the top-level language package.
+type Builder struct {
+ Tag Tag
+
+ private string // the x extension
+ variants []string
+ extensions []string
+}
+
+// Make returns a new Tag from the current settings.
+func (b *Builder) Make() Tag {
+ t := b.Tag
+
+ if len(b.extensions) > 0 || len(b.variants) > 0 {
+ sort.Sort(sortVariants(b.variants))
+ sort.Strings(b.extensions)
+
+ if b.private != "" {
+ b.extensions = append(b.extensions, b.private)
+ }
+ n := maxCoreSize + tokenLen(b.variants...) + tokenLen(b.extensions...)
+ buf := make([]byte, n)
+ p := t.genCoreBytes(buf)
+ t.pVariant = byte(p)
+ p += appendTokens(buf[p:], b.variants...)
+ t.pExt = uint16(p)
+ p += appendTokens(buf[p:], b.extensions...)
+ t.str = string(buf[:p])
+ // We may not always need to remake the string, but when or when not
+ // to do so is rather tricky.
+ scan := makeScanner(buf[:p])
+ t, _ = parse(&scan, "")
+ return t
+
+ } else if b.private != "" {
+ t.str = b.private
+ t.RemakeString()
+ }
+ return t
+}
+
+// SetTag copies all the settings from a given Tag. Any previously set values
+// are discarded.
+func (b *Builder) SetTag(t Tag) {
+ b.Tag.LangID = t.LangID
+ b.Tag.RegionID = t.RegionID
+ b.Tag.ScriptID = t.ScriptID
+ // TODO: optimize
+ b.variants = b.variants[:0]
+ if variants := t.Variants(); variants != "" {
+ for _, vr := range strings.Split(variants[1:], "-") {
+ b.variants = append(b.variants, vr)
+ }
+ }
+ b.extensions, b.private = b.extensions[:0], ""
+ for _, e := range t.Extensions() {
+ b.AddExt(e)
+ }
+}
+
+// AddExt adds extension e to the tag. e must be a valid extension as returned
+// by Tag.Extension. If the extension already exists, it will be discarded,
+// except for a -u extension, where non-existing key-type pairs will added.
+func (b *Builder) AddExt(e string) {
+ if e[0] == 'x' {
+ if b.private == "" {
+ b.private = e
+ }
+ return
+ }
+ for i, s := range b.extensions {
+ if s[0] == e[0] {
+ if e[0] == 'u' {
+ b.extensions[i] += e[1:]
+ }
+ return
+ }
+ }
+ b.extensions = append(b.extensions, e)
+}
+
+// SetExt sets the extension e to the tag. e must be a valid extension as
+// returned by Tag.Extension. If the extension already exists, it will be
+// overwritten, except for a -u extension, where the individual key-type pairs
+// will be set.
+func (b *Builder) SetExt(e string) {
+ if e[0] == 'x' {
+ b.private = e
+ return
+ }
+ for i, s := range b.extensions {
+ if s[0] == e[0] {
+ if e[0] == 'u' {
+ b.extensions[i] = e + s[1:]
+ } else {
+ b.extensions[i] = e
+ }
+ return
+ }
+ }
+ b.extensions = append(b.extensions, e)
+}
+
+// AddVariant adds any number of variants.
+func (b *Builder) AddVariant(v ...string) {
+ for _, v := range v {
+ if v != "" {
+ b.variants = append(b.variants, v)
+ }
+ }
+}
+
+// ClearVariants removes any variants previously added, including those
+// copied from a Tag in SetTag.
+func (b *Builder) ClearVariants() {
+ b.variants = b.variants[:0]
+}
+
+// ClearExtensions removes any extensions previously added, including those
+// copied from a Tag in SetTag.
+func (b *Builder) ClearExtensions() {
+ b.private = ""
+ b.extensions = b.extensions[:0]
+}
+
+func tokenLen(token ...string) (n int) {
+ for _, t := range token {
+ n += len(t) + 1
+ }
+ return
+}
+
+func appendTokens(b []byte, token ...string) int {
+ p := 0
+ for _, t := range token {
+ b[p] = '-'
+ copy(b[p+1:], t)
+ p += 1 + len(t)
+ }
+ return p
+}
+
+type sortVariants []string
+
+func (s sortVariants) Len() int {
+ return len(s)
+}
+
+func (s sortVariants) Swap(i, j int) {
+ s[j], s[i] = s[i], s[j]
+}
+
+func (s sortVariants) Less(i, j int) bool {
+ return variantIndex[s[i]] < variantIndex[s[j]]
+}
diff --git a/vendor/golang.org/x/text/internal/language/coverage.go b/vendor/golang.org/x/text/internal/language/coverage.go
new file mode 100644
index 000000000..9b20b88fe
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/language/coverage.go
@@ -0,0 +1,28 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package language
+
+// BaseLanguages returns the list of all supported base languages. It generates
+// the list by traversing the internal structures.
+func BaseLanguages() []Language {
+ base := make([]Language, 0, NumLanguages)
+ for i := 0; i < langNoIndexOffset; i++ {
+ // We included "und" already for the value 0.
+ if i != nonCanonicalUnd {
+ base = append(base, Language(i))
+ }
+ }
+ i := langNoIndexOffset
+ for _, v := range langNoIndex {
+ for k := 0; k < 8; k++ {
+ if v&1 == 1 {
+ base = append(base, Language(i))
+ }
+ v >>= 1
+ i++
+ }
+ }
+ return base
+}
diff --git a/vendor/golang.org/x/text/internal/language/language.go b/vendor/golang.org/x/text/internal/language/language.go
new file mode 100644
index 000000000..09d41c736
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/language/language.go
@@ -0,0 +1,627 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate go run gen.go gen_common.go -output tables.go
+
+package language // import "golang.org/x/text/internal/language"
+
+// TODO: Remove above NOTE after:
+// - verifying that tables are dropped correctly (most notably matcher tables).
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+)
+
+const (
+ // maxCoreSize is the maximum size of a BCP 47 tag without variants and
+ // extensions. Equals max lang (3) + script (4) + max reg (3) + 2 dashes.
+ maxCoreSize = 12
+
+ // max99thPercentileSize is a somewhat arbitrary buffer size that presumably
+ // is large enough to hold at least 99% of the BCP 47 tags.
+ max99thPercentileSize = 32
+
+ // maxSimpleUExtensionSize is the maximum size of a -u extension with one
+ // key-type pair. Equals len("-u-") + key (2) + dash + max value (8).
+ maxSimpleUExtensionSize = 14
+)
+
+// Tag represents a BCP 47 language tag. It is used to specify an instance of a
+// specific language or locale. All language tag values are guaranteed to be
+// well-formed. The zero value of Tag is Und.
+type Tag struct {
+ // TODO: the following fields have the form TagTypeID. This name is chosen
+ // to allow refactoring the public package without conflicting with its
+ // Base, Script, and Region methods. Once the transition is fully completed
+ // the ID can be stripped from the name.
+
+ LangID Language
+ RegionID Region
+ // TODO: we will soon run out of positions for ScriptID. Idea: instead of
+ // storing lang, region, and ScriptID codes, store only the compact index and
+ // have a lookup table from this code to its expansion. This greatly speeds
+ // up table lookup, speed up common variant cases.
+ // This will also immediately free up 3 extra bytes. Also, the pVariant
+ // field can now be moved to the lookup table, as the compact index uniquely
+ // determines the offset of a possible variant.
+ ScriptID Script
+ pVariant byte // offset in str, includes preceding '-'
+ pExt uint16 // offset of first extension, includes preceding '-'
+
+ // str is the string representation of the Tag. It will only be used if the
+ // tag has variants or extensions.
+ str string
+}
+
+// Make is a convenience wrapper for Parse that omits the error.
+// In case of an error, a sensible default is returned.
+func Make(s string) Tag {
+ t, _ := Parse(s)
+ return t
+}
+
+// Raw returns the raw base language, script and region, without making an
+// attempt to infer their values.
+// TODO: consider removing
+func (t Tag) Raw() (b Language, s Script, r Region) {
+ return t.LangID, t.ScriptID, t.RegionID
+}
+
+// equalTags compares language, script and region subtags only.
+func (t Tag) equalTags(a Tag) bool {
+ return t.LangID == a.LangID && t.ScriptID == a.ScriptID && t.RegionID == a.RegionID
+}
+
+// IsRoot returns true if t is equal to language "und".
+func (t Tag) IsRoot() bool {
+ if int(t.pVariant) < len(t.str) {
+ return false
+ }
+ return t.equalTags(Und)
+}
+
+// IsPrivateUse reports whether the Tag consists solely of an IsPrivateUse use
+// tag.
+func (t Tag) IsPrivateUse() bool {
+ return t.str != "" && t.pVariant == 0
+}
+
+// RemakeString is used to update t.str in case lang, script or region changed.
+// It is assumed that pExt and pVariant still point to the start of the
+// respective parts.
+func (t *Tag) RemakeString() {
+ if t.str == "" {
+ return
+ }
+ extra := t.str[t.pVariant:]
+ if t.pVariant > 0 {
+ extra = extra[1:]
+ }
+ if t.equalTags(Und) && strings.HasPrefix(extra, "x-") {
+ t.str = extra
+ t.pVariant = 0
+ t.pExt = 0
+ return
+ }
+ var buf [max99thPercentileSize]byte // avoid extra memory allocation in most cases.
+ b := buf[:t.genCoreBytes(buf[:])]
+ if extra != "" {
+ diff := len(b) - int(t.pVariant)
+ b = append(b, '-')
+ b = append(b, extra...)
+ t.pVariant = uint8(int(t.pVariant) + diff)
+ t.pExt = uint16(int(t.pExt) + diff)
+ } else {
+ t.pVariant = uint8(len(b))
+ t.pExt = uint16(len(b))
+ }
+ t.str = string(b)
+}
+
+// genCoreBytes writes a string for the base languages, script and region tags
+// to the given buffer and returns the number of bytes written. It will never
+// write more than maxCoreSize bytes.
+func (t *Tag) genCoreBytes(buf []byte) int {
+ n := t.LangID.StringToBuf(buf[:])
+ if t.ScriptID != 0 {
+ n += copy(buf[n:], "-")
+ n += copy(buf[n:], t.ScriptID.String())
+ }
+ if t.RegionID != 0 {
+ n += copy(buf[n:], "-")
+ n += copy(buf[n:], t.RegionID.String())
+ }
+ return n
+}
+
+// String returns the canonical string representation of the language tag.
+func (t Tag) String() string {
+ if t.str != "" {
+ return t.str
+ }
+ if t.ScriptID == 0 && t.RegionID == 0 {
+ return t.LangID.String()
+ }
+ buf := [maxCoreSize]byte{}
+ return string(buf[:t.genCoreBytes(buf[:])])
+}
+
+// MarshalText implements encoding.TextMarshaler.
+func (t Tag) MarshalText() (text []byte, err error) {
+ if t.str != "" {
+ text = append(text, t.str...)
+ } else if t.ScriptID == 0 && t.RegionID == 0 {
+ text = append(text, t.LangID.String()...)
+ } else {
+ buf := [maxCoreSize]byte{}
+ text = buf[:t.genCoreBytes(buf[:])]
+ }
+ return text, nil
+}
+
+// UnmarshalText implements encoding.TextUnmarshaler.
+func (t *Tag) UnmarshalText(text []byte) error {
+ tag, err := Parse(string(text))
+ *t = tag
+ return err
+}
+
+// Variants returns the part of the tag holding all variants or the empty string
+// if there are no variants defined.
+func (t Tag) Variants() string {
+ if t.pVariant == 0 {
+ return ""
+ }
+ return t.str[t.pVariant:t.pExt]
+}
+
+// VariantOrPrivateUseTags returns variants or private use tags.
+func (t Tag) VariantOrPrivateUseTags() string {
+ if t.pExt > 0 {
+ return t.str[t.pVariant:t.pExt]
+ }
+ return t.str[t.pVariant:]
+}
+
+// HasString reports whether this tag defines more than just the raw
+// components.
+func (t Tag) HasString() bool {
+ return t.str != ""
+}
+
+// Parent returns the CLDR parent of t. In CLDR, missing fields in data for a
+// specific language are substituted with fields from the parent language.
+// The parent for a language may change for newer versions of CLDR.
+func (t Tag) Parent() Tag {
+ if t.str != "" {
+ // Strip the variants and extensions.
+ b, s, r := t.Raw()
+ t = Tag{LangID: b, ScriptID: s, RegionID: r}
+ if t.RegionID == 0 && t.ScriptID != 0 && t.LangID != 0 {
+ base, _ := addTags(Tag{LangID: t.LangID})
+ if base.ScriptID == t.ScriptID {
+ return Tag{LangID: t.LangID}
+ }
+ }
+ return t
+ }
+ if t.LangID != 0 {
+ if t.RegionID != 0 {
+ maxScript := t.ScriptID
+ if maxScript == 0 {
+ max, _ := addTags(t)
+ maxScript = max.ScriptID
+ }
+
+ for i := range parents {
+ if Language(parents[i].lang) == t.LangID && Script(parents[i].maxScript) == maxScript {
+ for _, r := range parents[i].fromRegion {
+ if Region(r) == t.RegionID {
+ return Tag{
+ LangID: t.LangID,
+ ScriptID: Script(parents[i].script),
+ RegionID: Region(parents[i].toRegion),
+ }
+ }
+ }
+ }
+ }
+
+ // Strip the script if it is the default one.
+ base, _ := addTags(Tag{LangID: t.LangID})
+ if base.ScriptID != maxScript {
+ return Tag{LangID: t.LangID, ScriptID: maxScript}
+ }
+ return Tag{LangID: t.LangID}
+ } else if t.ScriptID != 0 {
+ // The parent for an base-script pair with a non-default script is
+ // "und" instead of the base language.
+ base, _ := addTags(Tag{LangID: t.LangID})
+ if base.ScriptID != t.ScriptID {
+ return Und
+ }
+ return Tag{LangID: t.LangID}
+ }
+ }
+ return Und
+}
+
+// ParseExtension parses s as an extension and returns it on success.
+func ParseExtension(s string) (ext string, err error) {
+ defer func() {
+ if recover() != nil {
+ ext = ""
+ err = ErrSyntax
+ }
+ }()
+
+ scan := makeScannerString(s)
+ var end int
+ if n := len(scan.token); n != 1 {
+ return "", ErrSyntax
+ }
+ scan.toLower(0, len(scan.b))
+ end = parseExtension(&scan)
+ if end != len(s) {
+ return "", ErrSyntax
+ }
+ return string(scan.b), nil
+}
+
+// HasVariants reports whether t has variants.
+func (t Tag) HasVariants() bool {
+ return uint16(t.pVariant) < t.pExt
+}
+
+// HasExtensions reports whether t has extensions.
+func (t Tag) HasExtensions() bool {
+ return int(t.pExt) < len(t.str)
+}
+
+// Extension returns the extension of type x for tag t. It will return
+// false for ok if t does not have the requested extension. The returned
+// extension will be invalid in this case.
+func (t Tag) Extension(x byte) (ext string, ok bool) {
+ for i := int(t.pExt); i < len(t.str)-1; {
+ var ext string
+ i, ext = getExtension(t.str, i)
+ if ext[0] == x {
+ return ext, true
+ }
+ }
+ return "", false
+}
+
+// Extensions returns all extensions of t.
+func (t Tag) Extensions() []string {
+ e := []string{}
+ for i := int(t.pExt); i < len(t.str)-1; {
+ var ext string
+ i, ext = getExtension(t.str, i)
+ e = append(e, ext)
+ }
+ return e
+}
+
+// TypeForKey returns the type associated with the given key, where key and type
+// are of the allowed values defined for the Unicode locale extension ('u') in
+// https://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers.
+// TypeForKey will traverse the inheritance chain to get the correct value.
+//
+// If there are multiple types associated with a key, only the first will be
+// returned. If there is no type associated with a key, it returns the empty
+// string.
+func (t Tag) TypeForKey(key string) string {
+ if _, start, end, _ := t.findTypeForKey(key); end != start {
+ s := t.str[start:end]
+ if p := strings.IndexByte(s, '-'); p >= 0 {
+ s = s[:p]
+ }
+ return s
+ }
+ return ""
+}
+
+var (
+ errPrivateUse = errors.New("cannot set a key on a private use tag")
+ errInvalidArguments = errors.New("invalid key or type")
+)
+
+// SetTypeForKey returns a new Tag with the key set to type, where key and type
+// are of the allowed values defined for the Unicode locale extension ('u') in
+// https://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers.
+// An empty value removes an existing pair with the same key.
+func (t Tag) SetTypeForKey(key, value string) (Tag, error) {
+ if t.IsPrivateUse() {
+ return t, errPrivateUse
+ }
+ if len(key) != 2 {
+ return t, errInvalidArguments
+ }
+
+ // Remove the setting if value is "".
+ if value == "" {
+ start, sep, end, _ := t.findTypeForKey(key)
+ if start != sep {
+ // Remove a possible empty extension.
+ switch {
+ case t.str[start-2] != '-': // has previous elements.
+ case end == len(t.str), // end of string
+ end+2 < len(t.str) && t.str[end+2] == '-': // end of extension
+ start -= 2
+ }
+ if start == int(t.pVariant) && end == len(t.str) {
+ t.str = ""
+ t.pVariant, t.pExt = 0, 0
+ } else {
+ t.str = fmt.Sprintf("%s%s", t.str[:start], t.str[end:])
+ }
+ }
+ return t, nil
+ }
+
+ if len(value) < 3 || len(value) > 8 {
+ return t, errInvalidArguments
+ }
+
+ var (
+ buf [maxCoreSize + maxSimpleUExtensionSize]byte
+ uStart int // start of the -u extension.
+ )
+
+ // Generate the tag string if needed.
+ if t.str == "" {
+ uStart = t.genCoreBytes(buf[:])
+ buf[uStart] = '-'
+ uStart++
+ }
+
+ // Create new key-type pair and parse it to verify.
+ b := buf[uStart:]
+ copy(b, "u-")
+ copy(b[2:], key)
+ b[4] = '-'
+ b = b[:5+copy(b[5:], value)]
+ scan := makeScanner(b)
+ if parseExtensions(&scan); scan.err != nil {
+ return t, scan.err
+ }
+
+ // Assemble the replacement string.
+ if t.str == "" {
+ t.pVariant, t.pExt = byte(uStart-1), uint16(uStart-1)
+ t.str = string(buf[:uStart+len(b)])
+ } else {
+ s := t.str
+ start, sep, end, hasExt := t.findTypeForKey(key)
+ if start == sep {
+ if hasExt {
+ b = b[2:]
+ }
+ t.str = fmt.Sprintf("%s-%s%s", s[:sep], b, s[end:])
+ } else {
+ t.str = fmt.Sprintf("%s-%s%s", s[:start+3], value, s[end:])
+ }
+ }
+ return t, nil
+}
+
+// findTypeForKey returns the start and end position for the type corresponding
+// to key or the point at which to insert the key-value pair if the type
+// wasn't found. The hasExt return value reports whether an -u extension was present.
+// Note: the extensions are typically very small and are likely to contain
+// only one key-type pair.
+func (t Tag) findTypeForKey(key string) (start, sep, end int, hasExt bool) {
+ p := int(t.pExt)
+ if len(key) != 2 || p == len(t.str) || p == 0 {
+ return p, p, p, false
+ }
+ s := t.str
+
+ // Find the correct extension.
+ for p++; s[p] != 'u'; p++ {
+ if s[p] > 'u' {
+ p--
+ return p, p, p, false
+ }
+ if p = nextExtension(s, p); p == len(s) {
+ return len(s), len(s), len(s), false
+ }
+ }
+ // Proceed to the hyphen following the extension name.
+ p++
+
+ // curKey is the key currently being processed.
+ curKey := ""
+
+ // Iterate over keys until we get the end of a section.
+ for {
+ end = p
+ for p++; p < len(s) && s[p] != '-'; p++ {
+ }
+ n := p - end - 1
+ if n <= 2 && curKey == key {
+ if sep < end {
+ sep++
+ }
+ return start, sep, end, true
+ }
+ switch n {
+ case 0, // invalid string
+ 1: // next extension
+ return end, end, end, true
+ case 2:
+ // next key
+ curKey = s[end+1 : p]
+ if curKey > key {
+ return end, end, end, true
+ }
+ start = end
+ sep = p
+ }
+ }
+}
+
+// ParseBase parses a 2- or 3-letter ISO 639 code.
+// It returns a ValueError if s is a well-formed but unknown language identifier
+// or another error if another error occurred.
+func ParseBase(s string) (l Language, err error) {
+ defer func() {
+ if recover() != nil {
+ l = 0
+ err = ErrSyntax
+ }
+ }()
+
+ if n := len(s); n < 2 || 3 < n {
+ return 0, ErrSyntax
+ }
+ var buf [3]byte
+ return getLangID(buf[:copy(buf[:], s)])
+}
+
+// ParseScript parses a 4-letter ISO 15924 code.
+// It returns a ValueError if s is a well-formed but unknown script identifier
+// or another error if another error occurred.
+func ParseScript(s string) (scr Script, err error) {
+ defer func() {
+ if recover() != nil {
+ scr = 0
+ err = ErrSyntax
+ }
+ }()
+
+ if len(s) != 4 {
+ return 0, ErrSyntax
+ }
+ var buf [4]byte
+ return getScriptID(script, buf[:copy(buf[:], s)])
+}
+
+// EncodeM49 returns the Region for the given UN M.49 code.
+// It returns an error if r is not a valid code.
+func EncodeM49(r int) (Region, error) {
+ return getRegionM49(r)
+}
+
+// ParseRegion parses a 2- or 3-letter ISO 3166-1 or a UN M.49 code.
+// It returns a ValueError if s is a well-formed but unknown region identifier
+// or another error if another error occurred.
+func ParseRegion(s string) (r Region, err error) {
+ defer func() {
+ if recover() != nil {
+ r = 0
+ err = ErrSyntax
+ }
+ }()
+
+ if n := len(s); n < 2 || 3 < n {
+ return 0, ErrSyntax
+ }
+ var buf [3]byte
+ return getRegionID(buf[:copy(buf[:], s)])
+}
+
+// IsCountry returns whether this region is a country or autonomous area. This
+// includes non-standard definitions from CLDR.
+func (r Region) IsCountry() bool {
+ if r == 0 || r.IsGroup() || r.IsPrivateUse() && r != _XK {
+ return false
+ }
+ return true
+}
+
+// IsGroup returns whether this region defines a collection of regions. This
+// includes non-standard definitions from CLDR.
+func (r Region) IsGroup() bool {
+ if r == 0 {
+ return false
+ }
+ return int(regionInclusion[r]) < len(regionContainment)
+}
+
+// Contains returns whether Region c is contained by Region r. It returns true
+// if c == r.
+func (r Region) Contains(c Region) bool {
+ if r == c {
+ return true
+ }
+ g := regionInclusion[r]
+ if g >= nRegionGroups {
+ return false
+ }
+ m := regionContainment[g]
+
+ d := regionInclusion[c]
+ b := regionInclusionBits[d]
+
+ // A contained country may belong to multiple disjoint groups. Matching any
+ // of these indicates containment. If the contained region is a group, it
+ // must strictly be a subset.
+ if d >= nRegionGroups {
+ return b&m != 0
+ }
+ return b&^m == 0
+}
+
+var errNoTLD = errors.New("language: region is not a valid ccTLD")
+
+// TLD returns the country code top-level domain (ccTLD). UK is returned for GB.
+// In all other cases it returns either the region itself or an error.
+//
+// This method may return an error for a region for which there exists a
+// canonical form with a ccTLD. To get that ccTLD canonicalize r first. The
+// region will already be canonicalized it was obtained from a Tag that was
+// obtained using any of the default methods.
+func (r Region) TLD() (Region, error) {
+ // See http://en.wikipedia.org/wiki/Country_code_top-level_domain for the
+ // difference between ISO 3166-1 and IANA ccTLD.
+ if r == _GB {
+ r = _UK
+ }
+ if (r.typ() & ccTLD) == 0 {
+ return 0, errNoTLD
+ }
+ return r, nil
+}
+
+// Canonicalize returns the region or a possible replacement if the region is
+// deprecated. It will not return a replacement for deprecated regions that
+// are split into multiple regions.
+func (r Region) Canonicalize() Region {
+ if cr := normRegion(r); cr != 0 {
+ return cr
+ }
+ return r
+}
+
+// Variant represents a registered variant of a language as defined by BCP 47.
+type Variant struct {
+ ID uint8
+ str string
+}
+
+// ParseVariant parses and returns a Variant. An error is returned if s is not
+// a valid variant.
+func ParseVariant(s string) (v Variant, err error) {
+ defer func() {
+ if recover() != nil {
+ v = Variant{}
+ err = ErrSyntax
+ }
+ }()
+
+ s = strings.ToLower(s)
+ if id, ok := variantIndex[s]; ok {
+ return Variant{id, s}, nil
+ }
+ return Variant{}, NewValueError([]byte(s))
+}
+
+// String returns the string representation of the variant.
+func (v Variant) String() string {
+ return v.str
+}
diff --git a/vendor/golang.org/x/text/internal/language/lookup.go b/vendor/golang.org/x/text/internal/language/lookup.go
new file mode 100644
index 000000000..231b4fbde
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/language/lookup.go
@@ -0,0 +1,412 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package language
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+ "strconv"
+
+ "golang.org/x/text/internal/tag"
+)
+
+// findIndex tries to find the given tag in idx and returns a standardized error
+// if it could not be found.
+func findIndex(idx tag.Index, key []byte, form string) (index int, err error) {
+ if !tag.FixCase(form, key) {
+ return 0, ErrSyntax
+ }
+ i := idx.Index(key)
+ if i == -1 {
+ return 0, NewValueError(key)
+ }
+ return i, nil
+}
+
+func searchUint(imap []uint16, key uint16) int {
+ return sort.Search(len(imap), func(i int) bool {
+ return imap[i] >= key
+ })
+}
+
+type Language uint16
+
+// getLangID returns the langID of s if s is a canonical subtag
+// or langUnknown if s is not a canonical subtag.
+func getLangID(s []byte) (Language, error) {
+ if len(s) == 2 {
+ return getLangISO2(s)
+ }
+ return getLangISO3(s)
+}
+
+// TODO language normalization as well as the AliasMaps could be moved to the
+// higher level package, but it is a bit tricky to separate the generation.
+
+func (id Language) Canonicalize() (Language, AliasType) {
+ return normLang(id)
+}
+
+// normLang returns the mapped langID of id according to mapping m.
+func normLang(id Language) (Language, AliasType) {
+ k := sort.Search(len(AliasMap), func(i int) bool {
+ return AliasMap[i].From >= uint16(id)
+ })
+ if k < len(AliasMap) && AliasMap[k].From == uint16(id) {
+ return Language(AliasMap[k].To), AliasTypes[k]
+ }
+ return id, AliasTypeUnknown
+}
+
+// getLangISO2 returns the langID for the given 2-letter ISO language code
+// or unknownLang if this does not exist.
+func getLangISO2(s []byte) (Language, error) {
+ if !tag.FixCase("zz", s) {
+ return 0, ErrSyntax
+ }
+ if i := lang.Index(s); i != -1 && lang.Elem(i)[3] != 0 {
+ return Language(i), nil
+ }
+ return 0, NewValueError(s)
+}
+
+const base = 'z' - 'a' + 1
+
+func strToInt(s []byte) uint {
+ v := uint(0)
+ for i := 0; i < len(s); i++ {
+ v *= base
+ v += uint(s[i] - 'a')
+ }
+ return v
+}
+
+// converts the given integer to the original ASCII string passed to strToInt.
+// len(s) must match the number of characters obtained.
+func intToStr(v uint, s []byte) {
+ for i := len(s) - 1; i >= 0; i-- {
+ s[i] = byte(v%base) + 'a'
+ v /= base
+ }
+}
+
+// getLangISO3 returns the langID for the given 3-letter ISO language code
+// or unknownLang if this does not exist.
+func getLangISO3(s []byte) (Language, error) {
+ if tag.FixCase("und", s) {
+ // first try to match canonical 3-letter entries
+ for i := lang.Index(s[:2]); i != -1; i = lang.Next(s[:2], i) {
+ if e := lang.Elem(i); e[3] == 0 && e[2] == s[2] {
+ // We treat "und" as special and always translate it to "unspecified".
+ // Note that ZZ and Zzzz are private use and are not treated as
+ // unspecified by default.
+ id := Language(i)
+ if id == nonCanonicalUnd {
+ return 0, nil
+ }
+ return id, nil
+ }
+ }
+ if i := altLangISO3.Index(s); i != -1 {
+ return Language(altLangIndex[altLangISO3.Elem(i)[3]]), nil
+ }
+ n := strToInt(s)
+ if langNoIndex[n/8]&(1<<(n%8)) != 0 {
+ return Language(n) + langNoIndexOffset, nil
+ }
+ // Check for non-canonical uses of ISO3.
+ for i := lang.Index(s[:1]); i != -1; i = lang.Next(s[:1], i) {
+ if e := lang.Elem(i); e[2] == s[1] && e[3] == s[2] {
+ return Language(i), nil
+ }
+ }
+ return 0, NewValueError(s)
+ }
+ return 0, ErrSyntax
+}
+
+// StringToBuf writes the string to b and returns the number of bytes
+// written. cap(b) must be >= 3.
+func (id Language) StringToBuf(b []byte) int {
+ if id >= langNoIndexOffset {
+ intToStr(uint(id)-langNoIndexOffset, b[:3])
+ return 3
+ } else if id == 0 {
+ return copy(b, "und")
+ }
+ l := lang[id<<2:]
+ if l[3] == 0 {
+ return copy(b, l[:3])
+ }
+ return copy(b, l[:2])
+}
+
+// String returns the BCP 47 representation of the langID.
+// Use b as variable name, instead of id, to ensure the variable
+// used is consistent with that of Base in which this type is embedded.
+func (b Language) String() string {
+ if b == 0 {
+ return "und"
+ } else if b >= langNoIndexOffset {
+ b -= langNoIndexOffset
+ buf := [3]byte{}
+ intToStr(uint(b), buf[:])
+ return string(buf[:])
+ }
+ l := lang.Elem(int(b))
+ if l[3] == 0 {
+ return l[:3]
+ }
+ return l[:2]
+}
+
+// ISO3 returns the ISO 639-3 language code.
+func (b Language) ISO3() string {
+ if b == 0 || b >= langNoIndexOffset {
+ return b.String()
+ }
+ l := lang.Elem(int(b))
+ if l[3] == 0 {
+ return l[:3]
+ } else if l[2] == 0 {
+ return altLangISO3.Elem(int(l[3]))[:3]
+ }
+ // This allocation will only happen for 3-letter ISO codes
+ // that are non-canonical BCP 47 language identifiers.
+ return l[0:1] + l[2:4]
+}
+
+// IsPrivateUse reports whether this language code is reserved for private use.
+func (b Language) IsPrivateUse() bool {
+ return langPrivateStart <= b && b <= langPrivateEnd
+}
+
+// SuppressScript returns the script marked as SuppressScript in the IANA
+// language tag repository, or 0 if there is no such script.
+func (b Language) SuppressScript() Script {
+ if b < langNoIndexOffset {
+ return Script(suppressScript[b])
+ }
+ return 0
+}
+
+type Region uint16
+
+// getRegionID returns the region id for s if s is a valid 2-letter region code
+// or unknownRegion.
+func getRegionID(s []byte) (Region, error) {
+ if len(s) == 3 {
+ if isAlpha(s[0]) {
+ return getRegionISO3(s)
+ }
+ if i, err := strconv.ParseUint(string(s), 10, 10); err == nil {
+ return getRegionM49(int(i))
+ }
+ }
+ return getRegionISO2(s)
+}
+
+// getRegionISO2 returns the regionID for the given 2-letter ISO country code
+// or unknownRegion if this does not exist.
+func getRegionISO2(s []byte) (Region, error) {
+ i, err := findIndex(regionISO, s, "ZZ")
+ if err != nil {
+ return 0, err
+ }
+ return Region(i) + isoRegionOffset, nil
+}
+
+// getRegionISO3 returns the regionID for the given 3-letter ISO country code
+// or unknownRegion if this does not exist.
+func getRegionISO3(s []byte) (Region, error) {
+ if tag.FixCase("ZZZ", s) {
+ for i := regionISO.Index(s[:1]); i != -1; i = regionISO.Next(s[:1], i) {
+ if e := regionISO.Elem(i); e[2] == s[1] && e[3] == s[2] {
+ return Region(i) + isoRegionOffset, nil
+ }
+ }
+ for i := 0; i < len(altRegionISO3); i += 3 {
+ if tag.Compare(altRegionISO3[i:i+3], s) == 0 {
+ return Region(altRegionIDs[i/3]), nil
+ }
+ }
+ return 0, NewValueError(s)
+ }
+ return 0, ErrSyntax
+}
+
+func getRegionM49(n int) (Region, error) {
+ if 0 < n && n <= 999 {
+ const (
+ searchBits = 7
+ regionBits = 9
+ regionMask = 1<> searchBits
+ buf := fromM49[m49Index[idx]:m49Index[idx+1]]
+ val := uint16(n) << regionBits // we rely on bits shifting out
+ i := sort.Search(len(buf), func(i int) bool {
+ return buf[i] >= val
+ })
+ if r := fromM49[int(m49Index[idx])+i]; r&^regionMask == val {
+ return Region(r & regionMask), nil
+ }
+ }
+ var e ValueError
+ fmt.Fprint(bytes.NewBuffer([]byte(e.v[:])), n)
+ return 0, e
+}
+
+// normRegion returns a region if r is deprecated or 0 otherwise.
+// TODO: consider supporting BYS (-> BLR), CSK (-> 200 or CZ), PHI (-> PHL) and AFI (-> DJ).
+// TODO: consider mapping split up regions to new most populous one (like CLDR).
+func normRegion(r Region) Region {
+ m := regionOldMap
+ k := sort.Search(len(m), func(i int) bool {
+ return m[i].From >= uint16(r)
+ })
+ if k < len(m) && m[k].From == uint16(r) {
+ return Region(m[k].To)
+ }
+ return 0
+}
+
+const (
+ iso3166UserAssigned = 1 << iota
+ ccTLD
+ bcp47Region
+)
+
+func (r Region) typ() byte {
+ return regionTypes[r]
+}
+
+// String returns the BCP 47 representation for the region.
+// It returns "ZZ" for an unspecified region.
+func (r Region) String() string {
+ if r < isoRegionOffset {
+ if r == 0 {
+ return "ZZ"
+ }
+ return fmt.Sprintf("%03d", r.M49())
+ }
+ r -= isoRegionOffset
+ return regionISO.Elem(int(r))[:2]
+}
+
+// ISO3 returns the 3-letter ISO code of r.
+// Note that not all regions have a 3-letter ISO code.
+// In such cases this method returns "ZZZ".
+func (r Region) ISO3() string {
+ if r < isoRegionOffset {
+ return "ZZZ"
+ }
+ r -= isoRegionOffset
+ reg := regionISO.Elem(int(r))
+ switch reg[2] {
+ case 0:
+ return altRegionISO3[reg[3]:][:3]
+ case ' ':
+ return "ZZZ"
+ }
+ return reg[0:1] + reg[2:4]
+}
+
+// M49 returns the UN M.49 encoding of r, or 0 if this encoding
+// is not defined for r.
+func (r Region) M49() int {
+ return int(m49[r])
+}
+
+// IsPrivateUse reports whether r has the ISO 3166 User-assigned status. This
+// may include private-use tags that are assigned by CLDR and used in this
+// implementation. So IsPrivateUse and IsCountry can be simultaneously true.
+func (r Region) IsPrivateUse() bool {
+ return r.typ()&iso3166UserAssigned != 0
+}
+
+type Script uint16
+
+// getScriptID returns the script id for string s. It assumes that s
+// is of the format [A-Z][a-z]{3}.
+func getScriptID(idx tag.Index, s []byte) (Script, error) {
+ i, err := findIndex(idx, s, "Zzzz")
+ return Script(i), err
+}
+
+// String returns the script code in title case.
+// It returns "Zzzz" for an unspecified script.
+func (s Script) String() string {
+ if s == 0 {
+ return "Zzzz"
+ }
+ return script.Elem(int(s))
+}
+
+// IsPrivateUse reports whether this script code is reserved for private use.
+func (s Script) IsPrivateUse() bool {
+ return _Qaaa <= s && s <= _Qabx
+}
+
+const (
+ maxAltTaglen = len("en-US-POSIX")
+ maxLen = maxAltTaglen
+)
+
+var (
+ // grandfatheredMap holds a mapping from legacy and grandfathered tags to
+ // their base language or index to more elaborate tag.
+ grandfatheredMap = map[[maxLen]byte]int16{
+ [maxLen]byte{'a', 'r', 't', '-', 'l', 'o', 'j', 'b', 'a', 'n'}: _jbo, // art-lojban
+ [maxLen]byte{'i', '-', 'a', 'm', 'i'}: _ami, // i-ami
+ [maxLen]byte{'i', '-', 'b', 'n', 'n'}: _bnn, // i-bnn
+ [maxLen]byte{'i', '-', 'h', 'a', 'k'}: _hak, // i-hak
+ [maxLen]byte{'i', '-', 'k', 'l', 'i', 'n', 'g', 'o', 'n'}: _tlh, // i-klingon
+ [maxLen]byte{'i', '-', 'l', 'u', 'x'}: _lb, // i-lux
+ [maxLen]byte{'i', '-', 'n', 'a', 'v', 'a', 'j', 'o'}: _nv, // i-navajo
+ [maxLen]byte{'i', '-', 'p', 'w', 'n'}: _pwn, // i-pwn
+ [maxLen]byte{'i', '-', 't', 'a', 'o'}: _tao, // i-tao
+ [maxLen]byte{'i', '-', 't', 'a', 'y'}: _tay, // i-tay
+ [maxLen]byte{'i', '-', 't', 's', 'u'}: _tsu, // i-tsu
+ [maxLen]byte{'n', 'o', '-', 'b', 'o', 'k'}: _nb, // no-bok
+ [maxLen]byte{'n', 'o', '-', 'n', 'y', 'n'}: _nn, // no-nyn
+ [maxLen]byte{'s', 'g', 'n', '-', 'b', 'e', '-', 'f', 'r'}: _sfb, // sgn-BE-FR
+ [maxLen]byte{'s', 'g', 'n', '-', 'b', 'e', '-', 'n', 'l'}: _vgt, // sgn-BE-NL
+ [maxLen]byte{'s', 'g', 'n', '-', 'c', 'h', '-', 'd', 'e'}: _sgg, // sgn-CH-DE
+ [maxLen]byte{'z', 'h', '-', 'g', 'u', 'o', 'y', 'u'}: _cmn, // zh-guoyu
+ [maxLen]byte{'z', 'h', '-', 'h', 'a', 'k', 'k', 'a'}: _hak, // zh-hakka
+ [maxLen]byte{'z', 'h', '-', 'm', 'i', 'n', '-', 'n', 'a', 'n'}: _nan, // zh-min-nan
+ [maxLen]byte{'z', 'h', '-', 'x', 'i', 'a', 'n', 'g'}: _hsn, // zh-xiang
+
+ // Grandfathered tags with no modern replacement will be converted as
+ // follows:
+ [maxLen]byte{'c', 'e', 'l', '-', 'g', 'a', 'u', 'l', 'i', 's', 'h'}: -1, // cel-gaulish
+ [maxLen]byte{'e', 'n', '-', 'g', 'b', '-', 'o', 'e', 'd'}: -2, // en-GB-oed
+ [maxLen]byte{'i', '-', 'd', 'e', 'f', 'a', 'u', 'l', 't'}: -3, // i-default
+ [maxLen]byte{'i', '-', 'e', 'n', 'o', 'c', 'h', 'i', 'a', 'n'}: -4, // i-enochian
+ [maxLen]byte{'i', '-', 'm', 'i', 'n', 'g', 'o'}: -5, // i-mingo
+ [maxLen]byte{'z', 'h', '-', 'm', 'i', 'n'}: -6, // zh-min
+
+ // CLDR-specific tag.
+ [maxLen]byte{'r', 'o', 'o', 't'}: 0, // root
+ [maxLen]byte{'e', 'n', '-', 'u', 's', '-', 'p', 'o', 's', 'i', 'x'}: -7, // en_US_POSIX"
+ }
+
+ altTagIndex = [...]uint8{0, 17, 31, 45, 61, 74, 86, 102}
+
+ altTags = "xtg-x-cel-gaulishen-GB-oxendicten-x-i-defaultund-x-i-enochiansee-x-i-mingonan-x-zh-minen-US-u-va-posix"
+)
+
+func grandfathered(s [maxAltTaglen]byte) (t Tag, ok bool) {
+ if v, ok := grandfatheredMap[s]; ok {
+ if v < 0 {
+ return Make(altTags[altTagIndex[-v-1]:altTagIndex[-v]]), true
+ }
+ t.LangID = Language(v)
+ return t, true
+ }
+ return t, false
+}
diff --git a/vendor/golang.org/x/text/internal/language/match.go b/vendor/golang.org/x/text/internal/language/match.go
new file mode 100644
index 000000000..75a2dbca7
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/language/match.go
@@ -0,0 +1,226 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package language
+
+import "errors"
+
+type scriptRegionFlags uint8
+
+const (
+ isList = 1 << iota
+ scriptInFrom
+ regionInFrom
+)
+
+func (t *Tag) setUndefinedLang(id Language) {
+ if t.LangID == 0 {
+ t.LangID = id
+ }
+}
+
+func (t *Tag) setUndefinedScript(id Script) {
+ if t.ScriptID == 0 {
+ t.ScriptID = id
+ }
+}
+
+func (t *Tag) setUndefinedRegion(id Region) {
+ if t.RegionID == 0 || t.RegionID.Contains(id) {
+ t.RegionID = id
+ }
+}
+
+// ErrMissingLikelyTagsData indicates no information was available
+// to compute likely values of missing tags.
+var ErrMissingLikelyTagsData = errors.New("missing likely tags data")
+
+// addLikelySubtags sets subtags to their most likely value, given the locale.
+// In most cases this means setting fields for unknown values, but in some
+// cases it may alter a value. It returns an ErrMissingLikelyTagsData error
+// if the given locale cannot be expanded.
+func (t Tag) addLikelySubtags() (Tag, error) {
+ id, err := addTags(t)
+ if err != nil {
+ return t, err
+ } else if id.equalTags(t) {
+ return t, nil
+ }
+ id.RemakeString()
+ return id, nil
+}
+
+// specializeRegion attempts to specialize a group region.
+func specializeRegion(t *Tag) bool {
+ if i := regionInclusion[t.RegionID]; i < nRegionGroups {
+ x := likelyRegionGroup[i]
+ if Language(x.lang) == t.LangID && Script(x.script) == t.ScriptID {
+ t.RegionID = Region(x.region)
+ }
+ return true
+ }
+ return false
+}
+
+// Maximize returns a new tag with missing tags filled in.
+func (t Tag) Maximize() (Tag, error) {
+ return addTags(t)
+}
+
+func addTags(t Tag) (Tag, error) {
+ // We leave private use identifiers alone.
+ if t.IsPrivateUse() {
+ return t, nil
+ }
+ if t.ScriptID != 0 && t.RegionID != 0 {
+ if t.LangID != 0 {
+ // already fully specified
+ specializeRegion(&t)
+ return t, nil
+ }
+ // Search matches for und-script-region. Note that for these cases
+ // region will never be a group so there is no need to check for this.
+ list := likelyRegion[t.RegionID : t.RegionID+1]
+ if x := list[0]; x.flags&isList != 0 {
+ list = likelyRegionList[x.lang : x.lang+uint16(x.script)]
+ }
+ for _, x := range list {
+ // Deviating from the spec. See match_test.go for details.
+ if Script(x.script) == t.ScriptID {
+ t.setUndefinedLang(Language(x.lang))
+ return t, nil
+ }
+ }
+ }
+ if t.LangID != 0 {
+ // Search matches for lang-script and lang-region, where lang != und.
+ if t.LangID < langNoIndexOffset {
+ x := likelyLang[t.LangID]
+ if x.flags&isList != 0 {
+ list := likelyLangList[x.region : x.region+uint16(x.script)]
+ if t.ScriptID != 0 {
+ for _, x := range list {
+ if Script(x.script) == t.ScriptID && x.flags&scriptInFrom != 0 {
+ t.setUndefinedRegion(Region(x.region))
+ return t, nil
+ }
+ }
+ } else if t.RegionID != 0 {
+ count := 0
+ goodScript := true
+ tt := t
+ for _, x := range list {
+ // We visit all entries for which the script was not
+ // defined, including the ones where the region was not
+ // defined. This allows for proper disambiguation within
+ // regions.
+ if x.flags&scriptInFrom == 0 && t.RegionID.Contains(Region(x.region)) {
+ tt.RegionID = Region(x.region)
+ tt.setUndefinedScript(Script(x.script))
+ goodScript = goodScript && tt.ScriptID == Script(x.script)
+ count++
+ }
+ }
+ if count == 1 {
+ return tt, nil
+ }
+ // Even if we fail to find a unique Region, we might have
+ // an unambiguous script.
+ if goodScript {
+ t.ScriptID = tt.ScriptID
+ }
+ }
+ }
+ }
+ } else {
+ // Search matches for und-script.
+ if t.ScriptID != 0 {
+ x := likelyScript[t.ScriptID]
+ if x.region != 0 {
+ t.setUndefinedRegion(Region(x.region))
+ t.setUndefinedLang(Language(x.lang))
+ return t, nil
+ }
+ }
+ // Search matches for und-region. If und-script-region exists, it would
+ // have been found earlier.
+ if t.RegionID != 0 {
+ if i := regionInclusion[t.RegionID]; i < nRegionGroups {
+ x := likelyRegionGroup[i]
+ if x.region != 0 {
+ t.setUndefinedLang(Language(x.lang))
+ t.setUndefinedScript(Script(x.script))
+ t.RegionID = Region(x.region)
+ }
+ } else {
+ x := likelyRegion[t.RegionID]
+ if x.flags&isList != 0 {
+ x = likelyRegionList[x.lang]
+ }
+ if x.script != 0 && x.flags != scriptInFrom {
+ t.setUndefinedLang(Language(x.lang))
+ t.setUndefinedScript(Script(x.script))
+ return t, nil
+ }
+ }
+ }
+ }
+
+ // Search matches for lang.
+ if t.LangID < langNoIndexOffset {
+ x := likelyLang[t.LangID]
+ if x.flags&isList != 0 {
+ x = likelyLangList[x.region]
+ }
+ if x.region != 0 {
+ t.setUndefinedScript(Script(x.script))
+ t.setUndefinedRegion(Region(x.region))
+ }
+ specializeRegion(&t)
+ if t.LangID == 0 {
+ t.LangID = _en // default language
+ }
+ return t, nil
+ }
+ return t, ErrMissingLikelyTagsData
+}
+
+func (t *Tag) setTagsFrom(id Tag) {
+ t.LangID = id.LangID
+ t.ScriptID = id.ScriptID
+ t.RegionID = id.RegionID
+}
+
+// minimize removes the region or script subtags from t such that
+// t.addLikelySubtags() == t.minimize().addLikelySubtags().
+func (t Tag) minimize() (Tag, error) {
+ t, err := minimizeTags(t)
+ if err != nil {
+ return t, err
+ }
+ t.RemakeString()
+ return t, nil
+}
+
+// minimizeTags mimics the behavior of the ICU 51 C implementation.
+func minimizeTags(t Tag) (Tag, error) {
+ if t.equalTags(Und) {
+ return t, nil
+ }
+ max, err := addTags(t)
+ if err != nil {
+ return t, err
+ }
+ for _, id := range [...]Tag{
+ {LangID: t.LangID},
+ {LangID: t.LangID, RegionID: t.RegionID},
+ {LangID: t.LangID, ScriptID: t.ScriptID},
+ } {
+ if x, err := addTags(id); err == nil && max.equalTags(x) {
+ t.setTagsFrom(id)
+ break
+ }
+ }
+ return t, nil
+}
diff --git a/vendor/golang.org/x/text/internal/language/parse.go b/vendor/golang.org/x/text/internal/language/parse.go
new file mode 100644
index 000000000..aad1e0acf
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/language/parse.go
@@ -0,0 +1,608 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package language
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "sort"
+
+ "golang.org/x/text/internal/tag"
+)
+
+// isAlpha returns true if the byte is not a digit.
+// b must be an ASCII letter or digit.
+func isAlpha(b byte) bool {
+ return b > '9'
+}
+
+// isAlphaNum returns true if the string contains only ASCII letters or digits.
+func isAlphaNum(s []byte) bool {
+ for _, c := range s {
+ if !('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9') {
+ return false
+ }
+ }
+ return true
+}
+
+// ErrSyntax is returned by any of the parsing functions when the
+// input is not well-formed, according to BCP 47.
+// TODO: return the position at which the syntax error occurred?
+var ErrSyntax = errors.New("language: tag is not well-formed")
+
+// ErrDuplicateKey is returned when a tag contains the same key twice with
+// different values in the -u section.
+var ErrDuplicateKey = errors.New("language: different values for same key in -u extension")
+
+// ValueError is returned by any of the parsing functions when the
+// input is well-formed but the respective subtag is not recognized
+// as a valid value.
+type ValueError struct {
+ v [8]byte
+}
+
+// NewValueError creates a new ValueError.
+func NewValueError(tag []byte) ValueError {
+ var e ValueError
+ copy(e.v[:], tag)
+ return e
+}
+
+func (e ValueError) tag() []byte {
+ n := bytes.IndexByte(e.v[:], 0)
+ if n == -1 {
+ n = 8
+ }
+ return e.v[:n]
+}
+
+// Error implements the error interface.
+func (e ValueError) Error() string {
+ return fmt.Sprintf("language: subtag %q is well-formed but unknown", e.tag())
+}
+
+// Subtag returns the subtag for which the error occurred.
+func (e ValueError) Subtag() string {
+ return string(e.tag())
+}
+
+// scanner is used to scan BCP 47 tokens, which are separated by _ or -.
+type scanner struct {
+ b []byte
+ bytes [max99thPercentileSize]byte
+ token []byte
+ start int // start position of the current token
+ end int // end position of the current token
+ next int // next point for scan
+ err error
+ done bool
+}
+
+func makeScannerString(s string) scanner {
+ scan := scanner{}
+ if len(s) <= len(scan.bytes) {
+ scan.b = scan.bytes[:copy(scan.bytes[:], s)]
+ } else {
+ scan.b = []byte(s)
+ }
+ scan.init()
+ return scan
+}
+
+// makeScanner returns a scanner using b as the input buffer.
+// b is not copied and may be modified by the scanner routines.
+func makeScanner(b []byte) scanner {
+ scan := scanner{b: b}
+ scan.init()
+ return scan
+}
+
+func (s *scanner) init() {
+ for i, c := range s.b {
+ if c == '_' {
+ s.b[i] = '-'
+ }
+ }
+ s.scan()
+}
+
+// restToLower converts the string between start and end to lower case.
+func (s *scanner) toLower(start, end int) {
+ for i := start; i < end; i++ {
+ c := s.b[i]
+ if 'A' <= c && c <= 'Z' {
+ s.b[i] += 'a' - 'A'
+ }
+ }
+}
+
+func (s *scanner) setError(e error) {
+ if s.err == nil || (e == ErrSyntax && s.err != ErrSyntax) {
+ s.err = e
+ }
+}
+
+// resizeRange shrinks or grows the array at position oldStart such that
+// a new string of size newSize can fit between oldStart and oldEnd.
+// Sets the scan point to after the resized range.
+func (s *scanner) resizeRange(oldStart, oldEnd, newSize int) {
+ s.start = oldStart
+ if end := oldStart + newSize; end != oldEnd {
+ diff := end - oldEnd
+ var b []byte
+ if n := len(s.b) + diff; n > cap(s.b) {
+ b = make([]byte, n)
+ copy(b, s.b[:oldStart])
+ } else {
+ b = s.b[:n]
+ }
+ copy(b[end:], s.b[oldEnd:])
+ s.b = b
+ s.next = end + (s.next - s.end)
+ s.end = end
+ }
+}
+
+// replace replaces the current token with repl.
+func (s *scanner) replace(repl string) {
+ s.resizeRange(s.start, s.end, len(repl))
+ copy(s.b[s.start:], repl)
+}
+
+// gobble removes the current token from the input.
+// Caller must call scan after calling gobble.
+func (s *scanner) gobble(e error) {
+ s.setError(e)
+ if s.start == 0 {
+ s.b = s.b[:+copy(s.b, s.b[s.next:])]
+ s.end = 0
+ } else {
+ s.b = s.b[:s.start-1+copy(s.b[s.start-1:], s.b[s.end:])]
+ s.end = s.start - 1
+ }
+ s.next = s.start
+}
+
+// deleteRange removes the given range from s.b before the current token.
+func (s *scanner) deleteRange(start, end int) {
+ s.b = s.b[:start+copy(s.b[start:], s.b[end:])]
+ diff := end - start
+ s.next -= diff
+ s.start -= diff
+ s.end -= diff
+}
+
+// scan parses the next token of a BCP 47 string. Tokens that are larger
+// than 8 characters or include non-alphanumeric characters result in an error
+// and are gobbled and removed from the output.
+// It returns the end position of the last token consumed.
+func (s *scanner) scan() (end int) {
+ end = s.end
+ s.token = nil
+ for s.start = s.next; s.next < len(s.b); {
+ i := bytes.IndexByte(s.b[s.next:], '-')
+ if i == -1 {
+ s.end = len(s.b)
+ s.next = len(s.b)
+ i = s.end - s.start
+ } else {
+ s.end = s.next + i
+ s.next = s.end + 1
+ }
+ token := s.b[s.start:s.end]
+ if i < 1 || i > 8 || !isAlphaNum(token) {
+ s.gobble(ErrSyntax)
+ continue
+ }
+ s.token = token
+ return end
+ }
+ if n := len(s.b); n > 0 && s.b[n-1] == '-' {
+ s.setError(ErrSyntax)
+ s.b = s.b[:len(s.b)-1]
+ }
+ s.done = true
+ return end
+}
+
+// acceptMinSize parses multiple tokens of the given size or greater.
+// It returns the end position of the last token consumed.
+func (s *scanner) acceptMinSize(min int) (end int) {
+ end = s.end
+ s.scan()
+ for ; len(s.token) >= min; s.scan() {
+ end = s.end
+ }
+ return end
+}
+
+// Parse parses the given BCP 47 string and returns a valid Tag. If parsing
+// failed it returns an error and any part of the tag that could be parsed.
+// If parsing succeeded but an unknown value was found, it returns
+// ValueError. The Tag returned in this case is just stripped of the unknown
+// value. All other values are preserved. It accepts tags in the BCP 47 format
+// and extensions to this standard defined in
+// https://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers.
+func Parse(s string) (t Tag, err error) {
+ // TODO: consider supporting old-style locale key-value pairs.
+ if s == "" {
+ return Und, ErrSyntax
+ }
+ defer func() {
+ if recover() != nil {
+ t = Und
+ err = ErrSyntax
+ return
+ }
+ }()
+ if len(s) <= maxAltTaglen {
+ b := [maxAltTaglen]byte{}
+ for i, c := range s {
+ // Generating invalid UTF-8 is okay as it won't match.
+ if 'A' <= c && c <= 'Z' {
+ c += 'a' - 'A'
+ } else if c == '_' {
+ c = '-'
+ }
+ b[i] = byte(c)
+ }
+ if t, ok := grandfathered(b); ok {
+ return t, nil
+ }
+ }
+ scan := makeScannerString(s)
+ return parse(&scan, s)
+}
+
+func parse(scan *scanner, s string) (t Tag, err error) {
+ t = Und
+ var end int
+ if n := len(scan.token); n <= 1 {
+ scan.toLower(0, len(scan.b))
+ if n == 0 || scan.token[0] != 'x' {
+ return t, ErrSyntax
+ }
+ end = parseExtensions(scan)
+ } else if n >= 4 {
+ return Und, ErrSyntax
+ } else { // the usual case
+ t, end = parseTag(scan, true)
+ if n := len(scan.token); n == 1 {
+ t.pExt = uint16(end)
+ end = parseExtensions(scan)
+ } else if end < len(scan.b) {
+ scan.setError(ErrSyntax)
+ scan.b = scan.b[:end]
+ }
+ }
+ if int(t.pVariant) < len(scan.b) {
+ if end < len(s) {
+ s = s[:end]
+ }
+ if len(s) > 0 && tag.Compare(s, scan.b) == 0 {
+ t.str = s
+ } else {
+ t.str = string(scan.b)
+ }
+ } else {
+ t.pVariant, t.pExt = 0, 0
+ }
+ return t, scan.err
+}
+
+// parseTag parses language, script, region and variants.
+// It returns a Tag and the end position in the input that was parsed.
+// If doNorm is true, then - will be normalized to .
+func parseTag(scan *scanner, doNorm bool) (t Tag, end int) {
+ var e error
+ // TODO: set an error if an unknown lang, script or region is encountered.
+ t.LangID, e = getLangID(scan.token)
+ scan.setError(e)
+ scan.replace(t.LangID.String())
+ langStart := scan.start
+ end = scan.scan()
+ for len(scan.token) == 3 && isAlpha(scan.token[0]) {
+ // From http://tools.ietf.org/html/bcp47, - tags are equivalent
+ // to a tag of the form .
+ if doNorm {
+ lang, e := getLangID(scan.token)
+ if lang != 0 {
+ t.LangID = lang
+ langStr := lang.String()
+ copy(scan.b[langStart:], langStr)
+ scan.b[langStart+len(langStr)] = '-'
+ scan.start = langStart + len(langStr) + 1
+ }
+ scan.gobble(e)
+ }
+ end = scan.scan()
+ }
+ if len(scan.token) == 4 && isAlpha(scan.token[0]) {
+ t.ScriptID, e = getScriptID(script, scan.token)
+ if t.ScriptID == 0 {
+ scan.gobble(e)
+ }
+ end = scan.scan()
+ }
+ if n := len(scan.token); n >= 2 && n <= 3 {
+ t.RegionID, e = getRegionID(scan.token)
+ if t.RegionID == 0 {
+ scan.gobble(e)
+ } else {
+ scan.replace(t.RegionID.String())
+ }
+ end = scan.scan()
+ }
+ scan.toLower(scan.start, len(scan.b))
+ t.pVariant = byte(end)
+ end = parseVariants(scan, end, t)
+ t.pExt = uint16(end)
+ return t, end
+}
+
+var separator = []byte{'-'}
+
+// parseVariants scans tokens as long as each token is a valid variant string.
+// Duplicate variants are removed.
+func parseVariants(scan *scanner, end int, t Tag) int {
+ start := scan.start
+ varIDBuf := [4]uint8{}
+ variantBuf := [4][]byte{}
+ varID := varIDBuf[:0]
+ variant := variantBuf[:0]
+ last := -1
+ needSort := false
+ for ; len(scan.token) >= 4; scan.scan() {
+ // TODO: measure the impact of needing this conversion and redesign
+ // the data structure if there is an issue.
+ v, ok := variantIndex[string(scan.token)]
+ if !ok {
+ // unknown variant
+ // TODO: allow user-defined variants?
+ scan.gobble(NewValueError(scan.token))
+ continue
+ }
+ varID = append(varID, v)
+ variant = append(variant, scan.token)
+ if !needSort {
+ if last < int(v) {
+ last = int(v)
+ } else {
+ needSort = true
+ // There is no legal combinations of more than 7 variants
+ // (and this is by no means a useful sequence).
+ const maxVariants = 8
+ if len(varID) > maxVariants {
+ break
+ }
+ }
+ }
+ end = scan.end
+ }
+ if needSort {
+ sort.Sort(variantsSort{varID, variant})
+ k, l := 0, -1
+ for i, v := range varID {
+ w := int(v)
+ if l == w {
+ // Remove duplicates.
+ continue
+ }
+ varID[k] = varID[i]
+ variant[k] = variant[i]
+ k++
+ l = w
+ }
+ if str := bytes.Join(variant[:k], separator); len(str) == 0 {
+ end = start - 1
+ } else {
+ scan.resizeRange(start, end, len(str))
+ copy(scan.b[scan.start:], str)
+ end = scan.end
+ }
+ }
+ return end
+}
+
+type variantsSort struct {
+ i []uint8
+ v [][]byte
+}
+
+func (s variantsSort) Len() int {
+ return len(s.i)
+}
+
+func (s variantsSort) Swap(i, j int) {
+ s.i[i], s.i[j] = s.i[j], s.i[i]
+ s.v[i], s.v[j] = s.v[j], s.v[i]
+}
+
+func (s variantsSort) Less(i, j int) bool {
+ return s.i[i] < s.i[j]
+}
+
+type bytesSort struct {
+ b [][]byte
+ n int // first n bytes to compare
+}
+
+func (b bytesSort) Len() int {
+ return len(b.b)
+}
+
+func (b bytesSort) Swap(i, j int) {
+ b.b[i], b.b[j] = b.b[j], b.b[i]
+}
+
+func (b bytesSort) Less(i, j int) bool {
+ for k := 0; k < b.n; k++ {
+ if b.b[i][k] == b.b[j][k] {
+ continue
+ }
+ return b.b[i][k] < b.b[j][k]
+ }
+ return false
+}
+
+// parseExtensions parses and normalizes the extensions in the buffer.
+// It returns the last position of scan.b that is part of any extension.
+// It also trims scan.b to remove excess parts accordingly.
+func parseExtensions(scan *scanner) int {
+ start := scan.start
+ exts := [][]byte{}
+ private := []byte{}
+ end := scan.end
+ for len(scan.token) == 1 {
+ extStart := scan.start
+ ext := scan.token[0]
+ end = parseExtension(scan)
+ extension := scan.b[extStart:end]
+ if len(extension) < 3 || (ext != 'x' && len(extension) < 4) {
+ scan.setError(ErrSyntax)
+ end = extStart
+ continue
+ } else if start == extStart && (ext == 'x' || scan.start == len(scan.b)) {
+ scan.b = scan.b[:end]
+ return end
+ } else if ext == 'x' {
+ private = extension
+ break
+ }
+ exts = append(exts, extension)
+ }
+ sort.Sort(bytesSort{exts, 1})
+ if len(private) > 0 {
+ exts = append(exts, private)
+ }
+ scan.b = scan.b[:start]
+ if len(exts) > 0 {
+ scan.b = append(scan.b, bytes.Join(exts, separator)...)
+ } else if start > 0 {
+ // Strip trailing '-'.
+ scan.b = scan.b[:start-1]
+ }
+ return end
+}
+
+// parseExtension parses a single extension and returns the position of
+// the extension end.
+func parseExtension(scan *scanner) int {
+ start, end := scan.start, scan.end
+ switch scan.token[0] {
+ case 'u': // https://www.ietf.org/rfc/rfc6067.txt
+ attrStart := end
+ scan.scan()
+ for last := []byte{}; len(scan.token) > 2; scan.scan() {
+ if bytes.Compare(scan.token, last) != -1 {
+ // Attributes are unsorted. Start over from scratch.
+ p := attrStart + 1
+ scan.next = p
+ attrs := [][]byte{}
+ for scan.scan(); len(scan.token) > 2; scan.scan() {
+ attrs = append(attrs, scan.token)
+ end = scan.end
+ }
+ sort.Sort(bytesSort{attrs, 3})
+ copy(scan.b[p:], bytes.Join(attrs, separator))
+ break
+ }
+ last = scan.token
+ end = scan.end
+ }
+ // Scan key-type sequences. A key is of length 2 and may be followed
+ // by 0 or more "type" subtags from 3 to the maximum of 8 letters.
+ var last, key []byte
+ for attrEnd := end; len(scan.token) == 2; last = key {
+ key = scan.token
+ end = scan.end
+ for scan.scan(); end < scan.end && len(scan.token) > 2; scan.scan() {
+ end = scan.end
+ }
+ // TODO: check key value validity
+ if bytes.Compare(key, last) != 1 || scan.err != nil {
+ // We have an invalid key or the keys are not sorted.
+ // Start scanning keys from scratch and reorder.
+ p := attrEnd + 1
+ scan.next = p
+ keys := [][]byte{}
+ for scan.scan(); len(scan.token) == 2; {
+ keyStart := scan.start
+ end = scan.end
+ for scan.scan(); end < scan.end && len(scan.token) > 2; scan.scan() {
+ end = scan.end
+ }
+ keys = append(keys, scan.b[keyStart:end])
+ }
+ sort.Stable(bytesSort{keys, 2})
+ if n := len(keys); n > 0 {
+ k := 0
+ for i := 1; i < n; i++ {
+ if !bytes.Equal(keys[k][:2], keys[i][:2]) {
+ k++
+ keys[k] = keys[i]
+ } else if !bytes.Equal(keys[k], keys[i]) {
+ scan.setError(ErrDuplicateKey)
+ }
+ }
+ keys = keys[:k+1]
+ }
+ reordered := bytes.Join(keys, separator)
+ if e := p + len(reordered); e < end {
+ scan.deleteRange(e, end)
+ end = e
+ }
+ copy(scan.b[p:], reordered)
+ break
+ }
+ }
+ case 't': // https://www.ietf.org/rfc/rfc6497.txt
+ scan.scan()
+ if n := len(scan.token); n >= 2 && n <= 3 && isAlpha(scan.token[1]) {
+ _, end = parseTag(scan, false)
+ scan.toLower(start, end)
+ }
+ for len(scan.token) == 2 && !isAlpha(scan.token[1]) {
+ end = scan.acceptMinSize(3)
+ }
+ case 'x':
+ end = scan.acceptMinSize(1)
+ default:
+ end = scan.acceptMinSize(2)
+ }
+ return end
+}
+
+// getExtension returns the name, body and end position of the extension.
+func getExtension(s string, p int) (end int, ext string) {
+ if s[p] == '-' {
+ p++
+ }
+ if s[p] == 'x' {
+ return len(s), s[p:]
+ }
+ end = nextExtension(s, p)
+ return end, s[p:end]
+}
+
+// nextExtension finds the next extension within the string, searching
+// for the -- pattern from position p.
+// In the fast majority of cases, language tags will have at most
+// one extension and extensions tend to be small.
+func nextExtension(s string, p int) int {
+ for n := len(s) - 3; p < n; {
+ if s[p] == '-' {
+ if s[p+2] == '-' {
+ return p
+ }
+ p += 3
+ } else {
+ p++
+ }
+ }
+ return len(s)
+}
diff --git a/vendor/golang.org/x/text/internal/language/tables.go b/vendor/golang.org/x/text/internal/language/tables.go
new file mode 100644
index 000000000..14167e74e
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/language/tables.go
@@ -0,0 +1,3494 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+package language
+
+import "golang.org/x/text/internal/tag"
+
+// CLDRVersion is the CLDR version from which the tables in this package are derived.
+const CLDRVersion = "32"
+
+const NumLanguages = 8798
+
+const NumScripts = 261
+
+const NumRegions = 358
+
+type FromTo struct {
+ From uint16
+ To uint16
+}
+
+const nonCanonicalUnd = 1201
+const (
+ _af = 22
+ _am = 39
+ _ar = 58
+ _az = 88
+ _bg = 126
+ _bn = 165
+ _ca = 215
+ _cs = 250
+ _da = 257
+ _de = 269
+ _el = 310
+ _en = 313
+ _es = 318
+ _et = 320
+ _fa = 328
+ _fi = 337
+ _fil = 339
+ _fr = 350
+ _gu = 420
+ _he = 444
+ _hi = 446
+ _hr = 465
+ _hu = 469
+ _hy = 471
+ _id = 481
+ _is = 504
+ _it = 505
+ _ja = 512
+ _ka = 528
+ _kk = 578
+ _km = 586
+ _kn = 593
+ _ko = 596
+ _ky = 650
+ _lo = 696
+ _lt = 704
+ _lv = 711
+ _mk = 767
+ _ml = 772
+ _mn = 779
+ _mo = 784
+ _mr = 795
+ _ms = 799
+ _mul = 806
+ _my = 817
+ _nb = 839
+ _ne = 849
+ _nl = 871
+ _no = 879
+ _pa = 925
+ _pl = 947
+ _pt = 960
+ _ro = 988
+ _ru = 994
+ _sh = 1031
+ _si = 1036
+ _sk = 1042
+ _sl = 1046
+ _sq = 1073
+ _sr = 1074
+ _sv = 1092
+ _sw = 1093
+ _ta = 1104
+ _te = 1121
+ _th = 1131
+ _tl = 1146
+ _tn = 1152
+ _tr = 1162
+ _uk = 1198
+ _ur = 1204
+ _uz = 1212
+ _vi = 1219
+ _zh = 1321
+ _zu = 1327
+ _jbo = 515
+ _ami = 1650
+ _bnn = 2357
+ _hak = 438
+ _tlh = 14467
+ _lb = 661
+ _nv = 899
+ _pwn = 12055
+ _tao = 14188
+ _tay = 14198
+ _tsu = 14662
+ _nn = 874
+ _sfb = 13629
+ _vgt = 15701
+ _sgg = 13660
+ _cmn = 3007
+ _nan = 835
+ _hsn = 467
+)
+
+const langPrivateStart = 0x2f72
+
+const langPrivateEnd = 0x3179
+
+// lang holds an alphabetically sorted list of ISO-639 language identifiers.
+// All entries are 4 bytes. The index of the identifier (divided by 4) is the language tag.
+// For 2-byte language identifiers, the two successive bytes have the following meaning:
+// - if the first letter of the 2- and 3-letter ISO codes are the same:
+// the second and third letter of the 3-letter ISO code.
+// - otherwise: a 0 and a by 2 bits right-shifted index into altLangISO3.
+//
+// For 3-byte language identifiers the 4th byte is 0.
+const lang tag.Index = "" + // Size: 5324 bytes
+ "---\x00aaaraai\x00aak\x00aau\x00abbkabi\x00abq\x00abr\x00abt\x00aby\x00a" +
+ "cd\x00ace\x00ach\x00ada\x00ade\x00adj\x00ady\x00adz\x00aeveaeb\x00aey" +
+ "\x00affragc\x00agd\x00agg\x00agm\x00ago\x00agq\x00aha\x00ahl\x00aho\x00a" +
+ "jg\x00akkaakk\x00ala\x00ali\x00aln\x00alt\x00ammhamm\x00amn\x00amo\x00am" +
+ "p\x00anrganc\x00ank\x00ann\x00any\x00aoj\x00aom\x00aoz\x00apc\x00apd\x00" +
+ "ape\x00apr\x00aps\x00apz\x00arraarc\x00arh\x00arn\x00aro\x00arq\x00ars" +
+ "\x00ary\x00arz\x00assmasa\x00ase\x00asg\x00aso\x00ast\x00ata\x00atg\x00a" +
+ "tj\x00auy\x00avvaavl\x00avn\x00avt\x00avu\x00awa\x00awb\x00awo\x00awx" +
+ "\x00ayymayb\x00azzebaakbal\x00ban\x00bap\x00bar\x00bas\x00bav\x00bax\x00" +
+ "bba\x00bbb\x00bbc\x00bbd\x00bbj\x00bbp\x00bbr\x00bcf\x00bch\x00bci\x00bc" +
+ "m\x00bcn\x00bco\x00bcq\x00bcu\x00bdd\x00beelbef\x00beh\x00bej\x00bem\x00" +
+ "bet\x00bew\x00bex\x00bez\x00bfd\x00bfq\x00bft\x00bfy\x00bgulbgc\x00bgn" +
+ "\x00bgx\x00bhihbhb\x00bhg\x00bhi\x00bhk\x00bhl\x00bho\x00bhy\x00biisbib" +
+ "\x00big\x00bik\x00bim\x00bin\x00bio\x00biq\x00bjh\x00bji\x00bjj\x00bjn" +
+ "\x00bjo\x00bjr\x00bjt\x00bjz\x00bkc\x00bkm\x00bkq\x00bku\x00bkv\x00blt" +
+ "\x00bmambmh\x00bmk\x00bmq\x00bmu\x00bnenbng\x00bnm\x00bnp\x00boodboj\x00" +
+ "bom\x00bon\x00bpy\x00bqc\x00bqi\x00bqp\x00bqv\x00brrebra\x00brh\x00brx" +
+ "\x00brz\x00bsosbsj\x00bsq\x00bss\x00bst\x00bto\x00btt\x00btv\x00bua\x00b" +
+ "uc\x00bud\x00bug\x00buk\x00bum\x00buo\x00bus\x00buu\x00bvb\x00bwd\x00bwr" +
+ "\x00bxh\x00bye\x00byn\x00byr\x00bys\x00byv\x00byx\x00bza\x00bze\x00bzf" +
+ "\x00bzh\x00bzw\x00caatcan\x00cbj\x00cch\x00ccp\x00ceheceb\x00cfa\x00cgg" +
+ "\x00chhachk\x00chm\x00cho\x00chp\x00chr\x00cja\x00cjm\x00cjv\x00ckb\x00c" +
+ "kl\x00cko\x00cky\x00cla\x00cme\x00cmg\x00cooscop\x00cps\x00crrecrh\x00cr" +
+ "j\x00crk\x00crl\x00crm\x00crs\x00csescsb\x00csw\x00ctd\x00cuhucvhvcyymda" +
+ "andad\x00daf\x00dag\x00dah\x00dak\x00dar\x00dav\x00dbd\x00dbq\x00dcc\x00" +
+ "ddn\x00deeuded\x00den\x00dga\x00dgh\x00dgi\x00dgl\x00dgr\x00dgz\x00dia" +
+ "\x00dje\x00dnj\x00dob\x00doi\x00dop\x00dow\x00dri\x00drs\x00dsb\x00dtm" +
+ "\x00dtp\x00dts\x00dty\x00dua\x00duc\x00dud\x00dug\x00dvivdva\x00dww\x00d" +
+ "yo\x00dyu\x00dzzodzg\x00ebu\x00eeweefi\x00egl\x00egy\x00eka\x00eky\x00el" +
+ "llema\x00emi\x00enngenn\x00enq\x00eopoeri\x00es\x00\x05esu\x00etstetr" +
+ "\x00ett\x00etu\x00etx\x00euusewo\x00ext\x00faasfaa\x00fab\x00fag\x00fai" +
+ "\x00fan\x00ffulffi\x00ffm\x00fiinfia\x00fil\x00fit\x00fjijflr\x00fmp\x00" +
+ "foaofod\x00fon\x00for\x00fpe\x00fqs\x00frrafrc\x00frp\x00frr\x00frs\x00f" +
+ "ub\x00fud\x00fue\x00fuf\x00fuh\x00fuq\x00fur\x00fuv\x00fuy\x00fvr\x00fyr" +
+ "ygalegaa\x00gaf\x00gag\x00gah\x00gaj\x00gam\x00gan\x00gaw\x00gay\x00gba" +
+ "\x00gbf\x00gbm\x00gby\x00gbz\x00gcr\x00gdlagde\x00gdn\x00gdr\x00geb\x00g" +
+ "ej\x00gel\x00gez\x00gfk\x00ggn\x00ghs\x00gil\x00gim\x00gjk\x00gjn\x00gju" +
+ "\x00gkn\x00gkp\x00gllgglk\x00gmm\x00gmv\x00gnrngnd\x00gng\x00god\x00gof" +
+ "\x00goi\x00gom\x00gon\x00gor\x00gos\x00got\x00grb\x00grc\x00grt\x00grw" +
+ "\x00gsw\x00guujgub\x00guc\x00gud\x00gur\x00guw\x00gux\x00guz\x00gvlvgvf" +
+ "\x00gvr\x00gvs\x00gwc\x00gwi\x00gwt\x00gyi\x00haauhag\x00hak\x00ham\x00h" +
+ "aw\x00haz\x00hbb\x00hdy\x00heebhhy\x00hiinhia\x00hif\x00hig\x00hih\x00hi" +
+ "l\x00hla\x00hlu\x00hmd\x00hmt\x00hnd\x00hne\x00hnj\x00hnn\x00hno\x00homo" +
+ "hoc\x00hoj\x00hot\x00hrrvhsb\x00hsn\x00htathuunhui\x00hyyehzerianaian" +
+ "\x00iar\x00iba\x00ibb\x00iby\x00ica\x00ich\x00idndidd\x00idi\x00idu\x00i" +
+ "eleife\x00igboigb\x00ige\x00iiiiijj\x00ikpkikk\x00ikt\x00ikw\x00ikx\x00i" +
+ "lo\x00imo\x00inndinh\x00iodoiou\x00iri\x00isslittaiukuiw\x00\x03iwm\x00i" +
+ "ws\x00izh\x00izi\x00japnjab\x00jam\x00jbo\x00jbu\x00jen\x00jgk\x00jgo" +
+ "\x00ji\x00\x06jib\x00jmc\x00jml\x00jra\x00jut\x00jvavjwavkaatkaa\x00kab" +
+ "\x00kac\x00kad\x00kai\x00kaj\x00kam\x00kao\x00kbd\x00kbm\x00kbp\x00kbq" +
+ "\x00kbx\x00kby\x00kcg\x00kck\x00kcl\x00kct\x00kde\x00kdh\x00kdl\x00kdt" +
+ "\x00kea\x00ken\x00kez\x00kfo\x00kfr\x00kfy\x00kgonkge\x00kgf\x00kgp\x00k" +
+ "ha\x00khb\x00khn\x00khq\x00khs\x00kht\x00khw\x00khz\x00kiikkij\x00kiu" +
+ "\x00kiw\x00kjuakjd\x00kjg\x00kjs\x00kjy\x00kkazkkc\x00kkj\x00klalkln\x00" +
+ "klq\x00klt\x00klx\x00kmhmkmb\x00kmh\x00kmo\x00kms\x00kmu\x00kmw\x00knank" +
+ "nf\x00knp\x00koorkoi\x00kok\x00kol\x00kos\x00koz\x00kpe\x00kpf\x00kpo" +
+ "\x00kpr\x00kpx\x00kqb\x00kqf\x00kqs\x00kqy\x00kraukrc\x00kri\x00krj\x00k" +
+ "rl\x00krs\x00kru\x00ksasksb\x00ksd\x00ksf\x00ksh\x00ksj\x00ksr\x00ktb" +
+ "\x00ktm\x00kto\x00kuurkub\x00kud\x00kue\x00kuj\x00kum\x00kun\x00kup\x00k" +
+ "us\x00kvomkvg\x00kvr\x00kvx\x00kw\x00\x01kwj\x00kwo\x00kxa\x00kxc\x00kxm" +
+ "\x00kxp\x00kxw\x00kxz\x00kyirkye\x00kyx\x00kzr\x00laatlab\x00lad\x00lag" +
+ "\x00lah\x00laj\x00las\x00lbtzlbe\x00lbu\x00lbw\x00lcm\x00lcp\x00ldb\x00l" +
+ "ed\x00lee\x00lem\x00lep\x00leq\x00leu\x00lez\x00lguglgg\x00liimlia\x00li" +
+ "d\x00lif\x00lig\x00lih\x00lij\x00lis\x00ljp\x00lki\x00lkt\x00lle\x00lln" +
+ "\x00lmn\x00lmo\x00lmp\x00lninlns\x00lnu\x00loaoloj\x00lok\x00lol\x00lor" +
+ "\x00los\x00loz\x00lrc\x00ltitltg\x00luublua\x00luo\x00luy\x00luz\x00lvav" +
+ "lwl\x00lzh\x00lzz\x00mad\x00maf\x00mag\x00mai\x00mak\x00man\x00mas\x00ma" +
+ "w\x00maz\x00mbh\x00mbo\x00mbq\x00mbu\x00mbw\x00mci\x00mcp\x00mcq\x00mcr" +
+ "\x00mcu\x00mda\x00mde\x00mdf\x00mdh\x00mdj\x00mdr\x00mdx\x00med\x00mee" +
+ "\x00mek\x00men\x00mer\x00met\x00meu\x00mfa\x00mfe\x00mfn\x00mfo\x00mfq" +
+ "\x00mglgmgh\x00mgl\x00mgo\x00mgp\x00mgy\x00mhahmhi\x00mhl\x00mirimif\x00" +
+ "min\x00mis\x00miw\x00mkkdmki\x00mkl\x00mkp\x00mkw\x00mlalmle\x00mlp\x00m" +
+ "ls\x00mmo\x00mmu\x00mmx\x00mnonmna\x00mnf\x00mni\x00mnw\x00moolmoa\x00mo" +
+ "e\x00moh\x00mos\x00mox\x00mpp\x00mps\x00mpt\x00mpx\x00mql\x00mrarmrd\x00" +
+ "mrj\x00mro\x00mssamtltmtc\x00mtf\x00mti\x00mtr\x00mua\x00mul\x00mur\x00m" +
+ "us\x00mva\x00mvn\x00mvy\x00mwk\x00mwr\x00mwv\x00mxc\x00mxm\x00myyamyk" +
+ "\x00mym\x00myv\x00myw\x00myx\x00myz\x00mzk\x00mzm\x00mzn\x00mzp\x00mzw" +
+ "\x00mzz\x00naaunac\x00naf\x00nah\x00nak\x00nan\x00nap\x00naq\x00nas\x00n" +
+ "bobnca\x00nce\x00ncf\x00nch\x00nco\x00ncu\x00nddendc\x00nds\x00neepneb" +
+ "\x00new\x00nex\x00nfr\x00ngdonga\x00ngb\x00ngl\x00nhb\x00nhe\x00nhw\x00n" +
+ "if\x00nii\x00nij\x00nin\x00niu\x00niy\x00niz\x00njo\x00nkg\x00nko\x00nll" +
+ "dnmg\x00nmz\x00nnnonnf\x00nnh\x00nnk\x00nnm\x00noornod\x00noe\x00non\x00" +
+ "nop\x00nou\x00nqo\x00nrblnrb\x00nsk\x00nsn\x00nso\x00nss\x00ntm\x00ntr" +
+ "\x00nui\x00nup\x00nus\x00nuv\x00nux\x00nvavnwb\x00nxq\x00nxr\x00nyyanym" +
+ "\x00nyn\x00nzi\x00occiogc\x00ojjiokr\x00okv\x00omrmong\x00onn\x00ons\x00" +
+ "opm\x00orrioro\x00oru\x00osssosa\x00ota\x00otk\x00ozm\x00paanpag\x00pal" +
+ "\x00pam\x00pap\x00pau\x00pbi\x00pcd\x00pcm\x00pdc\x00pdt\x00ped\x00peo" +
+ "\x00pex\x00pfl\x00phl\x00phn\x00pilipil\x00pip\x00pka\x00pko\x00plolpla" +
+ "\x00pms\x00png\x00pnn\x00pnt\x00pon\x00ppo\x00pra\x00prd\x00prg\x00psusp" +
+ "ss\x00ptorptp\x00puu\x00pwa\x00quuequc\x00qug\x00rai\x00raj\x00rao\x00rc" +
+ "f\x00rej\x00rel\x00res\x00rgn\x00rhg\x00ria\x00rif\x00rjs\x00rkt\x00rmoh" +
+ "rmf\x00rmo\x00rmt\x00rmu\x00rnunrna\x00rng\x00roonrob\x00rof\x00roo\x00r" +
+ "ro\x00rtm\x00ruusrue\x00rug\x00rw\x00\x04rwk\x00rwo\x00ryu\x00saansaf" +
+ "\x00sah\x00saq\x00sas\x00sat\x00sav\x00saz\x00sba\x00sbe\x00sbp\x00scrds" +
+ "ck\x00scl\x00scn\x00sco\x00scs\x00sdndsdc\x00sdh\x00semesef\x00seh\x00se" +
+ "i\x00ses\x00sgagsga\x00sgs\x00sgw\x00sgz\x00sh\x00\x02shi\x00shk\x00shn" +
+ "\x00shu\x00siinsid\x00sig\x00sil\x00sim\x00sjr\x00sklkskc\x00skr\x00sks" +
+ "\x00sllvsld\x00sli\x00sll\x00sly\x00smmosma\x00smi\x00smj\x00smn\x00smp" +
+ "\x00smq\x00sms\x00snnasnc\x00snk\x00snp\x00snx\x00sny\x00soomsok\x00soq" +
+ "\x00sou\x00soy\x00spd\x00spl\x00sps\x00sqqisrrpsrb\x00srn\x00srr\x00srx" +
+ "\x00ssswssd\x00ssg\x00ssy\x00stotstk\x00stq\x00suunsua\x00sue\x00suk\x00" +
+ "sur\x00sus\x00svweswwaswb\x00swc\x00swg\x00swp\x00swv\x00sxn\x00sxw\x00s" +
+ "yl\x00syr\x00szl\x00taamtaj\x00tal\x00tan\x00taq\x00tbc\x00tbd\x00tbf" +
+ "\x00tbg\x00tbo\x00tbw\x00tbz\x00tci\x00tcy\x00tdd\x00tdg\x00tdh\x00teelt" +
+ "ed\x00tem\x00teo\x00tet\x00tfi\x00tggktgc\x00tgo\x00tgu\x00thhathl\x00th" +
+ "q\x00thr\x00tiirtif\x00tig\x00tik\x00tim\x00tio\x00tiv\x00tkuktkl\x00tkr" +
+ "\x00tkt\x00tlgltlf\x00tlx\x00tly\x00tmh\x00tmy\x00tnsntnh\x00toontof\x00" +
+ "tog\x00toq\x00tpi\x00tpm\x00tpz\x00tqo\x00trurtru\x00trv\x00trw\x00tssot" +
+ "sd\x00tsf\x00tsg\x00tsj\x00tsw\x00ttatttd\x00tte\x00ttj\x00ttr\x00tts" +
+ "\x00ttt\x00tuh\x00tul\x00tum\x00tuq\x00tvd\x00tvl\x00tvu\x00twwitwh\x00t" +
+ "wq\x00txg\x00tyahtya\x00tyv\x00tzm\x00ubu\x00udm\x00ugiguga\x00ukkruli" +
+ "\x00umb\x00und\x00unr\x00unx\x00urrduri\x00urt\x00urw\x00usa\x00utr\x00u" +
+ "vh\x00uvl\x00uzzbvag\x00vai\x00van\x00veenvec\x00vep\x00viievic\x00viv" +
+ "\x00vls\x00vmf\x00vmw\x00voolvot\x00vro\x00vun\x00vut\x00walnwae\x00waj" +
+ "\x00wal\x00wan\x00war\x00wbp\x00wbq\x00wbr\x00wci\x00wer\x00wgi\x00whg" +
+ "\x00wib\x00wiu\x00wiv\x00wja\x00wji\x00wls\x00wmo\x00wnc\x00wni\x00wnu" +
+ "\x00woolwob\x00wos\x00wrs\x00wsk\x00wtm\x00wuu\x00wuv\x00wwa\x00xav\x00x" +
+ "bi\x00xcr\x00xes\x00xhhoxla\x00xlc\x00xld\x00xmf\x00xmn\x00xmr\x00xna" +
+ "\x00xnr\x00xog\x00xon\x00xpr\x00xrb\x00xsa\x00xsi\x00xsm\x00xsr\x00xwe" +
+ "\x00yam\x00yao\x00yap\x00yas\x00yat\x00yav\x00yay\x00yaz\x00yba\x00ybb" +
+ "\x00yby\x00yer\x00ygr\x00ygw\x00yiidyko\x00yle\x00ylg\x00yll\x00yml\x00y" +
+ "ooryon\x00yrb\x00yre\x00yrl\x00yss\x00yua\x00yue\x00yuj\x00yut\x00yuw" +
+ "\x00zahazag\x00zbl\x00zdj\x00zea\x00zgh\x00zhhozhx\x00zia\x00zlm\x00zmi" +
+ "\x00zne\x00zuulzxx\x00zza\x00\xff\xff\xff\xff"
+
+const langNoIndexOffset = 1330
+
+// langNoIndex is a bit vector of all 3-letter language codes that are not used as an index
+// in lookup tables. The language ids for these language codes are derived directly
+// from the letters and are not consecutive.
+// Size: 2197 bytes, 2197 elements
+var langNoIndex = [2197]uint8{
+ // Entry 0 - 3F
+ 0xff, 0xf8, 0xed, 0xfe, 0xeb, 0xd3, 0x3b, 0xd2,
+ 0xfb, 0xbf, 0x7a, 0xfa, 0x37, 0x1d, 0x3c, 0x57,
+ 0x6e, 0x97, 0x73, 0x38, 0xfb, 0xea, 0xbf, 0x70,
+ 0xad, 0x03, 0xff, 0xff, 0xcf, 0x05, 0x84, 0x72,
+ 0xe9, 0xbf, 0xfd, 0xbf, 0xbf, 0xf7, 0xfd, 0x77,
+ 0x0f, 0xff, 0xef, 0x6f, 0xff, 0xfb, 0xdf, 0xe2,
+ 0xc9, 0xf8, 0x7f, 0x7e, 0x4d, 0xbc, 0x0a, 0x6a,
+ 0x7c, 0xea, 0xe3, 0xfa, 0x7a, 0xbf, 0x67, 0xff,
+ // Entry 40 - 7F
+ 0xff, 0xff, 0xff, 0xdf, 0x2a, 0x54, 0x91, 0xc0,
+ 0x5d, 0xe3, 0x97, 0x14, 0x07, 0x20, 0xdd, 0xed,
+ 0x9f, 0x3f, 0xc9, 0x21, 0xf8, 0x3f, 0x94, 0x35,
+ 0x7c, 0x5f, 0xff, 0x5f, 0x8e, 0x6e, 0xdf, 0xff,
+ 0xff, 0xff, 0x55, 0x7c, 0xd3, 0xfd, 0xbf, 0xb5,
+ 0x7b, 0xdf, 0x7f, 0xf7, 0xca, 0xfe, 0xdb, 0xa3,
+ 0xa8, 0xff, 0x1f, 0x67, 0x7d, 0xeb, 0xef, 0xce,
+ 0xff, 0xff, 0x9f, 0xff, 0xb7, 0xef, 0xfe, 0xcf,
+ // Entry 80 - BF
+ 0xdb, 0xff, 0xf3, 0xcd, 0xfb, 0x7f, 0xff, 0xff,
+ 0xbb, 0xee, 0xf7, 0xbd, 0xdb, 0xff, 0x5f, 0xf7,
+ 0xfd, 0xf2, 0xfd, 0xff, 0x5e, 0x2f, 0x3b, 0xba,
+ 0x7e, 0xff, 0xff, 0xfe, 0xf7, 0xff, 0xdd, 0xff,
+ 0xfd, 0xdf, 0xfb, 0xfe, 0x9d, 0xb4, 0xd3, 0xff,
+ 0xef, 0xff, 0xdf, 0xf7, 0x7f, 0xb7, 0xfd, 0xd5,
+ 0xa5, 0x77, 0x40, 0xff, 0x9c, 0xc1, 0x41, 0x2c,
+ 0x08, 0x21, 0x41, 0x00, 0x50, 0x40, 0x00, 0x80,
+ // Entry C0 - FF
+ 0xfb, 0x4a, 0xf2, 0x9f, 0xb4, 0x42, 0x41, 0x96,
+ 0x1b, 0x14, 0x08, 0xf3, 0x2b, 0xe7, 0x17, 0x56,
+ 0x05, 0x7d, 0x0e, 0x1c, 0x37, 0x7f, 0xf3, 0xef,
+ 0x97, 0xff, 0x5d, 0x38, 0x64, 0x08, 0x00, 0x10,
+ 0xbc, 0x85, 0xaf, 0xdf, 0xff, 0xff, 0x7b, 0x35,
+ 0x3e, 0xc7, 0xc7, 0xdf, 0xff, 0x01, 0x81, 0x00,
+ 0xb0, 0x05, 0x80, 0x00, 0x20, 0x00, 0x00, 0x03,
+ 0x40, 0x00, 0x40, 0x92, 0x21, 0x50, 0xb1, 0x5d,
+ // Entry 100 - 13F
+ 0xfd, 0xdc, 0xbe, 0x5e, 0x00, 0x00, 0x02, 0x64,
+ 0x0d, 0x19, 0x41, 0xdf, 0x79, 0x22, 0x00, 0x00,
+ 0x00, 0x5e, 0x64, 0xdc, 0x24, 0xe5, 0xd9, 0xe3,
+ 0xfe, 0xff, 0xfd, 0xcb, 0x9f, 0x14, 0x41, 0x0c,
+ 0x86, 0x00, 0xd1, 0x00, 0xf0, 0xc7, 0x67, 0x5f,
+ 0x56, 0x99, 0x5e, 0xb5, 0x6c, 0xaf, 0x03, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0xc0, 0x37, 0xda, 0x56,
+ 0x90, 0x6d, 0x01, 0x2e, 0x96, 0x69, 0x20, 0xfb,
+ // Entry 140 - 17F
+ 0xff, 0x3f, 0x00, 0x00, 0x00, 0x01, 0x0c, 0x16,
+ 0x03, 0x00, 0x00, 0xb0, 0x14, 0x23, 0x50, 0x06,
+ 0x0a, 0x00, 0x01, 0x00, 0x00, 0x10, 0x11, 0x09,
+ 0x00, 0x00, 0x60, 0x10, 0x00, 0x00, 0x00, 0x10,
+ 0x00, 0x00, 0x44, 0x00, 0x00, 0x10, 0x00, 0x05,
+ 0x08, 0x00, 0x00, 0x05, 0x00, 0x80, 0x28, 0x04,
+ 0x00, 0x00, 0x40, 0xd5, 0x2d, 0x00, 0x64, 0x35,
+ 0x24, 0x52, 0xf4, 0xd5, 0xbf, 0x62, 0xc9, 0x03,
+ // Entry 180 - 1BF
+ 0x00, 0x80, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x04, 0x13, 0x39, 0x01, 0xdd, 0x57, 0x98,
+ 0x21, 0x18, 0x81, 0x08, 0x00, 0x01, 0x40, 0x82,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x40, 0x00, 0x44, 0x00, 0x00, 0x80, 0xea,
+ 0xa9, 0x39, 0x00, 0x02, 0x00, 0x00, 0x00, 0x04,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
+ // Entry 1C0 - 1FF
+ 0x00, 0x03, 0x28, 0x05, 0x00, 0x00, 0x00, 0x00,
+ 0x04, 0x20, 0x04, 0xa6, 0x00, 0x04, 0x00, 0x00,
+ 0x81, 0x50, 0x00, 0x00, 0x00, 0x11, 0x84, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x55,
+ 0x02, 0x10, 0x08, 0x04, 0x00, 0x00, 0x00, 0x40,
+ 0x30, 0x83, 0x01, 0x00, 0x00, 0x00, 0x11, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x1e, 0xcd, 0xbf, 0x7a, 0xbf,
+ // Entry 200 - 23F
+ 0xdf, 0xc3, 0x83, 0x82, 0xc0, 0xfb, 0x57, 0x27,
+ 0xed, 0x55, 0xe7, 0x01, 0x00, 0x20, 0xb2, 0xc5,
+ 0xa4, 0x45, 0x25, 0x9b, 0x02, 0xdf, 0xe1, 0xdf,
+ 0x03, 0x44, 0x08, 0x90, 0x01, 0x04, 0x81, 0xe3,
+ 0x92, 0x54, 0xdb, 0x28, 0xd3, 0x5f, 0xfe, 0x6d,
+ 0x79, 0xed, 0x1c, 0x7f, 0x04, 0x08, 0x00, 0x01,
+ 0x21, 0x12, 0x64, 0x5f, 0xdd, 0x0e, 0x85, 0x4f,
+ 0x40, 0x40, 0x00, 0x04, 0xf1, 0xfd, 0x3d, 0x54,
+ // Entry 240 - 27F
+ 0xe8, 0x03, 0xb4, 0x27, 0x23, 0x0d, 0x00, 0x00,
+ 0x20, 0x7b, 0x78, 0x02, 0x07, 0x84, 0x00, 0xf0,
+ 0xbb, 0x7e, 0x5a, 0x00, 0x18, 0x04, 0x81, 0x00,
+ 0x00, 0x00, 0x80, 0x10, 0x90, 0x1c, 0x01, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x10, 0x40, 0x00, 0x04,
+ 0x08, 0xa0, 0x70, 0xa5, 0x0c, 0x40, 0x00, 0x00,
+ 0x91, 0x24, 0x04, 0x68, 0x00, 0x20, 0x70, 0xff,
+ 0x7b, 0x7f, 0x70, 0x00, 0x05, 0x9b, 0xdd, 0x66,
+ // Entry 280 - 2BF
+ 0x03, 0x00, 0x11, 0x00, 0x00, 0x00, 0x40, 0x05,
+ 0xb5, 0xb6, 0x80, 0x08, 0x04, 0x00, 0x04, 0x51,
+ 0xe2, 0xef, 0xfd, 0x3f, 0x05, 0x09, 0x08, 0x05,
+ 0x40, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00,
+ 0x0c, 0x00, 0x00, 0x00, 0x00, 0x81, 0x00, 0x60,
+ 0xe7, 0x48, 0x00, 0x81, 0x20, 0xc0, 0x05, 0x80,
+ 0x03, 0x00, 0x00, 0x00, 0x8c, 0x50, 0x40, 0x04,
+ 0x84, 0x47, 0x84, 0x40, 0x20, 0x10, 0x00, 0x20,
+ // Entry 2C0 - 2FF
+ 0x02, 0x50, 0x80, 0x11, 0x00, 0x99, 0x6c, 0xe2,
+ 0x50, 0x27, 0x1d, 0x11, 0x29, 0x0e, 0x59, 0xe9,
+ 0x33, 0x08, 0x00, 0x20, 0x04, 0x40, 0x10, 0x00,
+ 0x00, 0x00, 0x50, 0x44, 0x92, 0x49, 0xd6, 0x5d,
+ 0xa7, 0x81, 0x47, 0x97, 0xfb, 0x00, 0x10, 0x00,
+ 0x08, 0x00, 0x80, 0x00, 0x40, 0x04, 0x00, 0x01,
+ 0x02, 0x00, 0x01, 0x40, 0x80, 0x00, 0x40, 0x08,
+ 0xd8, 0xeb, 0xf6, 0x39, 0xc4, 0x8d, 0x12, 0x00,
+ // Entry 300 - 33F
+ 0x00, 0x0c, 0x04, 0x01, 0x20, 0x20, 0xdd, 0xa0,
+ 0x01, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00,
+ 0x04, 0x10, 0xd0, 0x9d, 0x95, 0x13, 0x04, 0x80,
+ 0x00, 0x01, 0xd0, 0x16, 0x40, 0x00, 0x10, 0xb0,
+ 0x10, 0x62, 0x4c, 0xd2, 0x02, 0x01, 0x4a, 0x00,
+ 0x46, 0x04, 0x00, 0x08, 0x02, 0x00, 0x20, 0x80,
+ 0x00, 0x80, 0x06, 0x00, 0x08, 0x00, 0x00, 0x00,
+ 0x00, 0xf0, 0xd8, 0x6f, 0x15, 0x02, 0x08, 0x00,
+ // Entry 340 - 37F
+ 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x10, 0x01,
+ 0x00, 0x10, 0x00, 0x00, 0x00, 0xf0, 0x84, 0xe3,
+ 0xdd, 0xbf, 0xf9, 0xf9, 0x3b, 0x7f, 0x7f, 0xdb,
+ 0xfd, 0xfc, 0xfe, 0xdf, 0xff, 0xfd, 0xff, 0xf6,
+ 0xfb, 0xfc, 0xf7, 0x1f, 0xff, 0xb3, 0x6c, 0xff,
+ 0xd9, 0xad, 0xdf, 0xfe, 0xef, 0xba, 0xdf, 0xff,
+ 0xff, 0xff, 0xb7, 0xdd, 0x7d, 0xbf, 0xab, 0x7f,
+ 0xfd, 0xfd, 0xdf, 0x2f, 0x9c, 0xdf, 0xf3, 0x6f,
+ // Entry 380 - 3BF
+ 0xdf, 0xdd, 0xff, 0xfb, 0xee, 0xd2, 0xab, 0x5f,
+ 0xd5, 0xdf, 0x7f, 0xff, 0xeb, 0xff, 0xe4, 0x4d,
+ 0xf9, 0xff, 0xfe, 0xf7, 0xfd, 0xdf, 0xfb, 0xbf,
+ 0xee, 0xdb, 0x6f, 0xef, 0xff, 0x7f, 0xff, 0xff,
+ 0xf7, 0x5f, 0xd3, 0x3b, 0xfd, 0xd9, 0xdf, 0xeb,
+ 0xbc, 0x08, 0x05, 0x24, 0xff, 0x07, 0x70, 0xfe,
+ 0xe6, 0x5e, 0x00, 0x08, 0x00, 0x83, 0x7d, 0x1f,
+ 0x06, 0xe6, 0x72, 0x60, 0xd1, 0x3c, 0x7f, 0x44,
+ // Entry 3C0 - 3FF
+ 0x02, 0x30, 0x9f, 0x7a, 0x16, 0xbd, 0x7f, 0x57,
+ 0xf2, 0xff, 0x31, 0xff, 0xf2, 0x1e, 0x90, 0xf7,
+ 0xf1, 0xf9, 0x45, 0x80, 0x01, 0x02, 0x00, 0x20,
+ 0x40, 0x54, 0x9f, 0x8a, 0xdf, 0xf9, 0x6e, 0x11,
+ 0x86, 0x51, 0xc0, 0xf3, 0xfb, 0x47, 0x40, 0x03,
+ 0x05, 0xd1, 0x50, 0x5c, 0x00, 0x40, 0x00, 0x10,
+ 0x04, 0x02, 0x00, 0x00, 0x0a, 0x00, 0x17, 0xd2,
+ 0xb9, 0xfd, 0xfc, 0xba, 0xfe, 0xef, 0xc7, 0xbe,
+ // Entry 400 - 43F
+ 0x53, 0x6f, 0xdf, 0xe7, 0xdb, 0x65, 0xbb, 0x7f,
+ 0xfa, 0xff, 0x77, 0xf3, 0xef, 0xbf, 0xfd, 0xf7,
+ 0xdf, 0xdf, 0x9b, 0x7f, 0xff, 0xff, 0x7f, 0x6f,
+ 0xf7, 0xfb, 0xeb, 0xdf, 0xbc, 0xff, 0xbf, 0x6b,
+ 0x7b, 0xfb, 0xff, 0xce, 0x76, 0xbd, 0xf7, 0xf7,
+ 0xdf, 0xdc, 0xf7, 0xf7, 0xff, 0xdf, 0xf3, 0xfe,
+ 0xef, 0xff, 0xff, 0xff, 0xb6, 0x7f, 0x7f, 0xde,
+ 0xf7, 0xb9, 0xeb, 0x77, 0xff, 0xfb, 0xbf, 0xdf,
+ // Entry 440 - 47F
+ 0xfd, 0xfe, 0xfb, 0xff, 0xfe, 0xeb, 0x1f, 0x7d,
+ 0x2f, 0xfd, 0xb6, 0xb5, 0xa5, 0xfc, 0xff, 0xfd,
+ 0x7f, 0x4e, 0xbf, 0x8f, 0xae, 0xff, 0xee, 0xdf,
+ 0x7f, 0xf7, 0x73, 0x02, 0x02, 0x04, 0xfc, 0xf7,
+ 0xff, 0xb7, 0xd7, 0xef, 0xfe, 0xcd, 0xf5, 0xce,
+ 0xe2, 0x8e, 0xe7, 0xbf, 0xb7, 0xff, 0x56, 0xfd,
+ 0xcd, 0xff, 0xfb, 0xff, 0xdf, 0xd7, 0xea, 0xff,
+ 0xe5, 0x5f, 0x6d, 0x0f, 0xa7, 0x51, 0x06, 0xc4,
+ // Entry 480 - 4BF
+ 0x93, 0x50, 0x5d, 0xaf, 0xa6, 0xff, 0x99, 0xfb,
+ 0x63, 0x1d, 0x53, 0xff, 0xef, 0xb7, 0x35, 0x20,
+ 0x14, 0x00, 0x55, 0x51, 0xc2, 0x65, 0xf5, 0x41,
+ 0xe2, 0xff, 0xfc, 0xdf, 0x02, 0x85, 0xc5, 0x05,
+ 0x00, 0x22, 0x00, 0x74, 0x69, 0x10, 0x08, 0x05,
+ 0x41, 0x00, 0x01, 0x06, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x51, 0x20, 0x05, 0x04, 0x01, 0x00, 0x00,
+ 0x06, 0x11, 0x20, 0x00, 0x18, 0x01, 0x92, 0xf1,
+ // Entry 4C0 - 4FF
+ 0xfd, 0x47, 0x69, 0x06, 0x95, 0x06, 0x57, 0xed,
+ 0xfb, 0x4d, 0x1c, 0x6b, 0x83, 0x04, 0x62, 0x40,
+ 0x00, 0x11, 0x42, 0x00, 0x00, 0x00, 0x54, 0x83,
+ 0xb8, 0x4f, 0x10, 0x8e, 0x89, 0x46, 0xde, 0xf7,
+ 0x13, 0x31, 0x00, 0x20, 0x00, 0x00, 0x00, 0x90,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x10, 0x00,
+ 0x01, 0x00, 0x00, 0xf0, 0x5b, 0xf4, 0xbe, 0x3d,
+ 0xbe, 0xcf, 0xf7, 0xaf, 0x42, 0x04, 0x84, 0x41,
+ // Entry 500 - 53F
+ 0x30, 0xff, 0x79, 0x72, 0x04, 0x00, 0x00, 0x49,
+ 0x2d, 0x14, 0x27, 0x5f, 0xed, 0xf1, 0x3f, 0xe7,
+ 0x3f, 0x00, 0x00, 0x02, 0xc6, 0xa0, 0x1e, 0xf8,
+ 0xbb, 0xff, 0xfd, 0xfb, 0xb7, 0xfd, 0xe7, 0xf7,
+ 0xfd, 0xfc, 0xd5, 0xed, 0x47, 0xf4, 0x7e, 0x10,
+ 0x01, 0x01, 0x84, 0x6d, 0xff, 0xf7, 0xdd, 0xf9,
+ 0x5b, 0x05, 0x86, 0xed, 0xf5, 0x77, 0xbd, 0x3c,
+ 0x00, 0x00, 0x00, 0x42, 0x71, 0x42, 0x00, 0x40,
+ // Entry 540 - 57F
+ 0x00, 0x00, 0x01, 0x43, 0x19, 0x24, 0x08, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ // Entry 580 - 5BF
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xab, 0xbd, 0xe7, 0x57, 0xee, 0x13, 0x5d,
+ 0x09, 0xc1, 0x40, 0x21, 0xfa, 0x17, 0x01, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0xf0, 0xce, 0xfb, 0xbf,
+ 0x00, 0x23, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00,
+ 0x00, 0x30, 0x15, 0xa3, 0x10, 0x00, 0x00, 0x00,
+ 0x11, 0x04, 0x16, 0x00, 0x00, 0x02, 0x20, 0x81,
+ 0xa3, 0x01, 0x50, 0x00, 0x00, 0x83, 0x11, 0x40,
+ // Entry 5C0 - 5FF
+ 0x00, 0x00, 0x00, 0xf0, 0xdd, 0x7b, 0xbe, 0x02,
+ 0xaa, 0x10, 0x5d, 0x98, 0x52, 0x00, 0x80, 0x20,
+ 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x02, 0x02,
+ 0x3d, 0x40, 0x10, 0x02, 0x10, 0x61, 0x5a, 0x9d,
+ 0x31, 0x00, 0x00, 0x00, 0x01, 0x18, 0x02, 0x20,
+ 0x00, 0x00, 0x01, 0x00, 0x42, 0x00, 0x20, 0x00,
+ 0x00, 0x1f, 0xdf, 0xd2, 0xb9, 0xff, 0xfd, 0x3f,
+ 0x1f, 0x98, 0xcf, 0x9c, 0xff, 0xaf, 0x5f, 0xfe,
+ // Entry 600 - 63F
+ 0x7b, 0x4b, 0x40, 0x10, 0xe1, 0xfd, 0xaf, 0xd9,
+ 0xb7, 0xf6, 0xfb, 0xb3, 0xc7, 0xff, 0x6f, 0xf1,
+ 0x73, 0xb1, 0x7f, 0x9f, 0x7f, 0xbd, 0xfc, 0xb7,
+ 0xee, 0x1c, 0xfa, 0xcb, 0xef, 0xdd, 0xf9, 0xbd,
+ 0x6e, 0xae, 0x55, 0xfd, 0x6e, 0x81, 0x76, 0x9f,
+ 0xd4, 0x77, 0xf5, 0x7d, 0xfb, 0xff, 0xeb, 0xfe,
+ 0xbe, 0x5f, 0x46, 0x5b, 0xe9, 0x5f, 0x50, 0x18,
+ 0x02, 0xfa, 0xf7, 0x9d, 0x15, 0x97, 0x05, 0x0f,
+ // Entry 640 - 67F
+ 0x75, 0xc4, 0x7d, 0x81, 0x92, 0xf5, 0x57, 0x6c,
+ 0xff, 0xe4, 0xef, 0x6f, 0xff, 0xfc, 0xdd, 0xde,
+ 0xfc, 0xfd, 0x76, 0x5f, 0x7a, 0x3f, 0x00, 0x98,
+ 0x02, 0xfb, 0xa3, 0xef, 0xf3, 0xd6, 0xf2, 0xff,
+ 0xb9, 0xda, 0x7d, 0xd0, 0x3e, 0x15, 0x7b, 0xb4,
+ 0xf5, 0x3e, 0xff, 0xff, 0xf1, 0xf7, 0xff, 0xe7,
+ 0x5f, 0xff, 0xff, 0x9e, 0xdf, 0xf6, 0xd7, 0xb9,
+ 0xef, 0x27, 0x80, 0xbb, 0xc5, 0xff, 0xff, 0xe3,
+ // Entry 680 - 6BF
+ 0x97, 0x9d, 0xbf, 0x9f, 0xf7, 0xc7, 0xfd, 0x37,
+ 0xce, 0x7f, 0x44, 0x1d, 0x73, 0x7f, 0xf8, 0xda,
+ 0x5d, 0xce, 0x7d, 0x06, 0xb9, 0xea, 0x79, 0xa0,
+ 0x1a, 0x20, 0x00, 0x30, 0x02, 0x04, 0x24, 0x08,
+ 0x04, 0x00, 0x00, 0x40, 0xd4, 0x02, 0x04, 0x00,
+ 0x00, 0x04, 0x00, 0x04, 0x00, 0x20, 0x09, 0x06,
+ 0x50, 0x00, 0x08, 0x00, 0x00, 0x00, 0x24, 0x00,
+ 0x04, 0x00, 0x10, 0xdc, 0x58, 0xd7, 0x0d, 0x0f,
+ // Entry 6C0 - 6FF
+ 0x54, 0x4d, 0xf1, 0x16, 0x44, 0xd5, 0x42, 0x08,
+ 0x40, 0x02, 0x00, 0x40, 0x00, 0x08, 0x00, 0x00,
+ 0x00, 0xdc, 0xfb, 0xcb, 0x0e, 0x58, 0x48, 0x41,
+ 0x24, 0x20, 0x04, 0x00, 0x30, 0x12, 0x40, 0x00,
+ 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x80, 0x10, 0x10, 0xab,
+ 0x6d, 0x93, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x80, 0x80, 0x25, 0x00, 0x00,
+ // Entry 700 - 73F
+ 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x00,
+ 0x80, 0x86, 0xc2, 0x00, 0x00, 0x01, 0x00, 0x01,
+ 0xff, 0x18, 0x02, 0x00, 0x02, 0xf0, 0xfd, 0x79,
+ 0x3b, 0x00, 0x25, 0x00, 0x00, 0x00, 0x02, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00,
+ 0x03, 0x00, 0x09, 0x20, 0x00, 0x00, 0x01, 0x00,
+ 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ // Entry 740 - 77F
+ 0x00, 0x00, 0x00, 0xef, 0xd5, 0xfd, 0xcf, 0x7e,
+ 0xb0, 0x11, 0x00, 0x00, 0x00, 0x92, 0x01, 0x46,
+ 0xcd, 0xf9, 0x5c, 0x00, 0x01, 0x00, 0x30, 0x04,
+ 0x04, 0x55, 0x00, 0x01, 0x04, 0xf4, 0x3f, 0x4a,
+ 0x01, 0x00, 0x00, 0xb0, 0x80, 0x20, 0x55, 0x75,
+ 0x97, 0x7c, 0xdf, 0x31, 0xcc, 0x68, 0xd1, 0x03,
+ 0xd5, 0x57, 0x27, 0x14, 0x01, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x2c, 0xf7, 0xcb, 0x1f, 0x14, 0x60,
+ // Entry 780 - 7BF
+ 0x83, 0x68, 0x01, 0x10, 0x8b, 0x38, 0x8a, 0x01,
+ 0x00, 0x00, 0x20, 0x00, 0x24, 0x44, 0x00, 0x00,
+ 0x10, 0x03, 0x31, 0x02, 0x01, 0x00, 0x00, 0xf0,
+ 0xf5, 0xff, 0xd5, 0x97, 0xbc, 0x70, 0xd6, 0x78,
+ 0x78, 0x15, 0x50, 0x05, 0xa4, 0x84, 0xa9, 0x41,
+ 0x00, 0x00, 0x00, 0x6b, 0x39, 0x52, 0x74, 0x40,
+ 0xe8, 0x30, 0x90, 0x6a, 0x92, 0x00, 0x00, 0x02,
+ 0xff, 0xef, 0xff, 0x4b, 0x85, 0x53, 0xf4, 0xed,
+ // Entry 7C0 - 7FF
+ 0xdd, 0xbf, 0xf2, 0x5d, 0xc7, 0x0c, 0xd5, 0x42,
+ 0xfc, 0xff, 0xf7, 0x1f, 0x00, 0x80, 0x40, 0x56,
+ 0xcc, 0x16, 0x9e, 0xea, 0x35, 0x7d, 0xef, 0xff,
+ 0xbd, 0xa4, 0xaf, 0x01, 0x44, 0x18, 0x01, 0x4d,
+ 0x4e, 0x4a, 0x08, 0x50, 0x28, 0x30, 0xe0, 0x80,
+ 0x10, 0x20, 0x24, 0x00, 0xff, 0x2f, 0xd3, 0x60,
+ 0xfe, 0x01, 0x02, 0x88, 0x2a, 0x40, 0x16, 0x01,
+ 0x01, 0x15, 0x2b, 0x3c, 0x01, 0x00, 0x00, 0x10,
+ // Entry 800 - 83F
+ 0x90, 0x49, 0x41, 0x02, 0x02, 0x01, 0xe1, 0xbf,
+ 0xbf, 0x03, 0x00, 0x00, 0x10, 0xdc, 0xa3, 0xd1,
+ 0x40, 0x9c, 0x44, 0xdf, 0xf5, 0x8f, 0x66, 0xb3,
+ 0x55, 0x20, 0xd4, 0xc1, 0xd8, 0x30, 0x3d, 0x80,
+ 0x00, 0x00, 0x00, 0x04, 0xd4, 0x11, 0xc5, 0x84,
+ 0x2f, 0x50, 0x00, 0x22, 0x50, 0x6e, 0xbd, 0x93,
+ 0x07, 0x00, 0x20, 0x10, 0x84, 0xb2, 0x45, 0x10,
+ 0x06, 0x44, 0x00, 0x00, 0x12, 0x02, 0x11, 0x00,
+ // Entry 840 - 87F
+ 0xf0, 0xfb, 0xfd, 0x7f, 0x05, 0x00, 0x16, 0x89,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x03,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x30, 0x02, 0x28,
+ 0x84, 0x00, 0x21, 0xc0, 0x23, 0x24, 0x00, 0x00,
+ 0x00, 0xcb, 0xe4, 0x3a, 0x46, 0x88, 0x54, 0xf1,
+ 0xef, 0xff, 0x7f, 0x12, 0x01, 0x01, 0x84, 0x50,
+ 0x07, 0xfc, 0xff, 0xff, 0x0f, 0x01, 0x00, 0x40,
+ 0x10, 0x38, 0x01, 0x01, 0x1c, 0x12, 0x40, 0xe1,
+ // Entry 880 - 8BF
+ 0x76, 0x16, 0x08, 0x03, 0x10, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x24,
+ 0x0a, 0x00, 0x80, 0x00, 0x00,
+}
+
+// altLangISO3 holds an alphabetically sorted list of 3-letter language code alternatives
+// to 2-letter language codes that cannot be derived using the method described above.
+// Each 3-letter code is followed by its 1-byte langID.
+const altLangISO3 tag.Index = "---\x00cor\x00hbs\x01heb\x02kin\x03spa\x04yid\x05\xff\xff\xff\xff"
+
+// altLangIndex is used to convert indexes in altLangISO3 to langIDs.
+// Size: 12 bytes, 6 elements
+var altLangIndex = [6]uint16{
+ 0x0281, 0x0407, 0x01fb, 0x03e5, 0x013e, 0x0208,
+}
+
+// AliasMap maps langIDs to their suggested replacements.
+// Size: 772 bytes, 193 elements
+var AliasMap = [193]FromTo{
+ 0: {From: 0x82, To: 0x88},
+ 1: {From: 0x187, To: 0x1ae},
+ 2: {From: 0x1f3, To: 0x1e1},
+ 3: {From: 0x1fb, To: 0x1bc},
+ 4: {From: 0x208, To: 0x512},
+ 5: {From: 0x20f, To: 0x20e},
+ 6: {From: 0x310, To: 0x3dc},
+ 7: {From: 0x347, To: 0x36f},
+ 8: {From: 0x407, To: 0x432},
+ 9: {From: 0x47a, To: 0x153},
+ 10: {From: 0x490, To: 0x451},
+ 11: {From: 0x4a2, To: 0x21},
+ 12: {From: 0x53e, To: 0x544},
+ 13: {From: 0x58f, To: 0x12d},
+ 14: {From: 0x62b, To: 0x34},
+ 15: {From: 0x62f, To: 0x14},
+ 16: {From: 0x630, To: 0x1eb1},
+ 17: {From: 0x651, To: 0x431},
+ 18: {From: 0x662, To: 0x431},
+ 19: {From: 0x6ed, To: 0x3a},
+ 20: {From: 0x6f8, To: 0x1d7},
+ 21: {From: 0x709, To: 0x3625},
+ 22: {From: 0x73e, To: 0x21a1},
+ 23: {From: 0x7b3, To: 0x56},
+ 24: {From: 0x7b9, To: 0x299b},
+ 25: {From: 0x7c5, To: 0x58},
+ 26: {From: 0x7e6, To: 0x145},
+ 27: {From: 0x80c, To: 0x5a},
+ 28: {From: 0x815, To: 0x8d},
+ 29: {From: 0x87e, To: 0x810},
+ 30: {From: 0x8a8, To: 0x8b7},
+ 31: {From: 0x8c3, To: 0xee3},
+ 32: {From: 0x8fa, To: 0x1dc},
+ 33: {From: 0x9ef, To: 0x331},
+ 34: {From: 0xa36, To: 0x2c5},
+ 35: {From: 0xa3d, To: 0xbf},
+ 36: {From: 0xabe, To: 0x3322},
+ 37: {From: 0xb38, To: 0x529},
+ 38: {From: 0xb75, To: 0x265a},
+ 39: {From: 0xb7e, To: 0xbc3},
+ 40: {From: 0xb9b, To: 0x44e},
+ 41: {From: 0xbbc, To: 0x4229},
+ 42: {From: 0xbbf, To: 0x529},
+ 43: {From: 0xbfe, To: 0x2da7},
+ 44: {From: 0xc2e, To: 0x3181},
+ 45: {From: 0xcb9, To: 0xf3},
+ 46: {From: 0xd08, To: 0xfa},
+ 47: {From: 0xdc8, To: 0x11a},
+ 48: {From: 0xdd7, To: 0x32d},
+ 49: {From: 0xdf8, To: 0xdfb},
+ 50: {From: 0xdfe, To: 0x531},
+ 51: {From: 0xe01, To: 0xdf3},
+ 52: {From: 0xedf, To: 0x205a},
+ 53: {From: 0xee9, To: 0x222e},
+ 54: {From: 0xeee, To: 0x2e9a},
+ 55: {From: 0xf39, To: 0x367},
+ 56: {From: 0x10d0, To: 0x140},
+ 57: {From: 0x1104, To: 0x2d0},
+ 58: {From: 0x11a0, To: 0x1ec},
+ 59: {From: 0x1279, To: 0x21},
+ 60: {From: 0x1424, To: 0x15e},
+ 61: {From: 0x1470, To: 0x14e},
+ 62: {From: 0x151f, To: 0xd9b},
+ 63: {From: 0x1523, To: 0x390},
+ 64: {From: 0x1532, To: 0x19f},
+ 65: {From: 0x1580, To: 0x210},
+ 66: {From: 0x1583, To: 0x10d},
+ 67: {From: 0x15a3, To: 0x3caf},
+ 68: {From: 0x1630, To: 0x222e},
+ 69: {From: 0x166a, To: 0x19b},
+ 70: {From: 0x16c8, To: 0x136},
+ 71: {From: 0x1700, To: 0x29f8},
+ 72: {From: 0x1718, To: 0x194},
+ 73: {From: 0x1727, To: 0xf3f},
+ 74: {From: 0x177a, To: 0x178},
+ 75: {From: 0x1809, To: 0x17b6},
+ 76: {From: 0x1816, To: 0x18f3},
+ 77: {From: 0x188a, To: 0x436},
+ 78: {From: 0x1979, To: 0x1d01},
+ 79: {From: 0x1a74, To: 0x2bb0},
+ 80: {From: 0x1a8a, To: 0x1f8},
+ 81: {From: 0x1b5a, To: 0x1fa},
+ 82: {From: 0x1b86, To: 0x1515},
+ 83: {From: 0x1d64, To: 0x2c9b},
+ 84: {From: 0x2038, To: 0x37b1},
+ 85: {From: 0x203d, To: 0x20dd},
+ 86: {From: 0x2042, To: 0x2e00},
+ 87: {From: 0x205a, To: 0x30b},
+ 88: {From: 0x20e3, To: 0x274},
+ 89: {From: 0x20ee, To: 0x263},
+ 90: {From: 0x20f2, To: 0x22d},
+ 91: {From: 0x20f9, To: 0x256},
+ 92: {From: 0x210f, To: 0x21eb},
+ 93: {From: 0x2135, To: 0x27d},
+ 94: {From: 0x2160, To: 0x913},
+ 95: {From: 0x2199, To: 0x121},
+ 96: {From: 0x21ce, To: 0x1561},
+ 97: {From: 0x21e6, To: 0x504},
+ 98: {From: 0x21f4, To: 0x49f},
+ 99: {From: 0x21fb, To: 0x269},
+ 100: {From: 0x222d, To: 0x121},
+ 101: {From: 0x2237, To: 0x121},
+ 102: {From: 0x2248, To: 0x217d},
+ 103: {From: 0x2262, To: 0x92a},
+ 104: {From: 0x2316, To: 0x3226},
+ 105: {From: 0x236a, To: 0x2835},
+ 106: {From: 0x2382, To: 0x3365},
+ 107: {From: 0x2472, To: 0x2c7},
+ 108: {From: 0x24e4, To: 0x2ff},
+ 109: {From: 0x24f0, To: 0x2fa},
+ 110: {From: 0x24fa, To: 0x31f},
+ 111: {From: 0x2550, To: 0xb5b},
+ 112: {From: 0x25a9, To: 0xe2},
+ 113: {From: 0x263e, To: 0x2d0},
+ 114: {From: 0x26c9, To: 0x26b4},
+ 115: {From: 0x26f9, To: 0x3c8},
+ 116: {From: 0x2727, To: 0x3caf},
+ 117: {From: 0x2755, To: 0x6a4},
+ 118: {From: 0x2765, To: 0x26b4},
+ 119: {From: 0x2789, To: 0x4358},
+ 120: {From: 0x27c9, To: 0x2001},
+ 121: {From: 0x28ea, To: 0x27b1},
+ 122: {From: 0x28ef, To: 0x2837},
+ 123: {From: 0x28fe, To: 0xaa5},
+ 124: {From: 0x2914, To: 0x351},
+ 125: {From: 0x2986, To: 0x2da7},
+ 126: {From: 0x29f0, To: 0x96b},
+ 127: {From: 0x2b1a, To: 0x38d},
+ 128: {From: 0x2bfc, To: 0x395},
+ 129: {From: 0x2c3f, To: 0x3caf},
+ 130: {From: 0x2ce1, To: 0x2201},
+ 131: {From: 0x2cfc, To: 0x3be},
+ 132: {From: 0x2d13, To: 0x597},
+ 133: {From: 0x2d47, To: 0x148},
+ 134: {From: 0x2d48, To: 0x148},
+ 135: {From: 0x2dff, To: 0x2f1},
+ 136: {From: 0x2e08, To: 0x19cc},
+ 137: {From: 0x2e10, To: 0xc45},
+ 138: {From: 0x2e1a, To: 0x2d95},
+ 139: {From: 0x2e21, To: 0x292},
+ 140: {From: 0x2e54, To: 0x7d},
+ 141: {From: 0x2e65, To: 0x2282},
+ 142: {From: 0x2e97, To: 0x1a4},
+ 143: {From: 0x2ea0, To: 0x2e9b},
+ 144: {From: 0x2eef, To: 0x2ed7},
+ 145: {From: 0x3193, To: 0x3c4},
+ 146: {From: 0x3366, To: 0x338e},
+ 147: {From: 0x342a, To: 0x3dc},
+ 148: {From: 0x34ee, To: 0x18d0},
+ 149: {From: 0x35c8, To: 0x2c9b},
+ 150: {From: 0x35e6, To: 0x412},
+ 151: {From: 0x35f5, To: 0x24b},
+ 152: {From: 0x360d, To: 0x1dc},
+ 153: {From: 0x3658, To: 0x246},
+ 154: {From: 0x3676, To: 0x3f4},
+ 155: {From: 0x36fd, To: 0x445},
+ 156: {From: 0x3747, To: 0x3b42},
+ 157: {From: 0x37c0, To: 0x121},
+ 158: {From: 0x3816, To: 0x38f2},
+ 159: {From: 0x382a, To: 0x2b48},
+ 160: {From: 0x382b, To: 0x2c9b},
+ 161: {From: 0x382f, To: 0xa9},
+ 162: {From: 0x3832, To: 0x3228},
+ 163: {From: 0x386c, To: 0x39a6},
+ 164: {From: 0x3892, To: 0x3fc0},
+ 165: {From: 0x38a0, To: 0x45f},
+ 166: {From: 0x38a5, To: 0x39d7},
+ 167: {From: 0x38b4, To: 0x1fa4},
+ 168: {From: 0x38b5, To: 0x2e9a},
+ 169: {From: 0x38fa, To: 0x38f1},
+ 170: {From: 0x395c, To: 0x47e},
+ 171: {From: 0x3b4e, To: 0xd91},
+ 172: {From: 0x3b78, To: 0x137},
+ 173: {From: 0x3c99, To: 0x4bc},
+ 174: {From: 0x3fbd, To: 0x100},
+ 175: {From: 0x4208, To: 0xa91},
+ 176: {From: 0x42be, To: 0x573},
+ 177: {From: 0x42f9, To: 0x3f60},
+ 178: {From: 0x4378, To: 0x25a},
+ 179: {From: 0x43b8, To: 0xe6c},
+ 180: {From: 0x43cd, To: 0x10f},
+ 181: {From: 0x43d4, To: 0x4848},
+ 182: {From: 0x44af, To: 0x3322},
+ 183: {From: 0x44e3, To: 0x512},
+ 184: {From: 0x45ca, To: 0x2409},
+ 185: {From: 0x45dd, To: 0x26dc},
+ 186: {From: 0x4610, To: 0x48ae},
+ 187: {From: 0x46ae, To: 0x46a0},
+ 188: {From: 0x473e, To: 0x4745},
+ 189: {From: 0x4817, To: 0x3503},
+ 190: {From: 0x483b, To: 0x208b},
+ 191: {From: 0x4916, To: 0x31f},
+ 192: {From: 0x49a7, To: 0x523},
+}
+
+// Size: 193 bytes, 193 elements
+var AliasTypes = [193]AliasType{
+ // Entry 0 - 3F
+ 1, 0, 0, 0, 0, 0, 0, 1, 2, 2, 0, 1, 0, 0, 0, 0,
+ 1, 2, 1, 1, 2, 0, 0, 1, 0, 1, 2, 1, 1, 0, 0, 0,
+ 0, 2, 1, 1, 0, 2, 0, 0, 1, 0, 1, 0, 0, 1, 2, 1,
+ 1, 1, 1, 0, 0, 0, 0, 2, 1, 1, 1, 1, 2, 1, 0, 1,
+ // Entry 40 - 7F
+ 1, 2, 2, 0, 0, 1, 2, 0, 1, 0, 1, 1, 1, 1, 0, 0,
+ 2, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 2, 2, 2, 0,
+ 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1,
+ // Entry 80 - BF
+ 1, 0, 0, 1, 0, 2, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0,
+ 0, 1, 1, 2, 0, 0, 2, 0, 0, 1, 1, 1, 0, 0, 0, 0,
+ 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 2, 0,
+ 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1,
+ // Entry C0 - FF
+ 1,
+}
+
+const (
+ _Latn = 91
+ _Hani = 57
+ _Hans = 59
+ _Hant = 60
+ _Qaaa = 149
+ _Qaai = 157
+ _Qabx = 198
+ _Zinh = 255
+ _Zyyy = 260
+ _Zzzz = 261
+)
+
+// script is an alphabetically sorted list of ISO 15924 codes. The index
+// of the script in the string, divided by 4, is the internal scriptID.
+const script tag.Index = "" + // Size: 1052 bytes
+ "----AdlmAfakAghbAhomArabAranArmiArmnAvstBaliBamuBassBatkBengBhksBlisBopo" +
+ "BrahBraiBugiBuhdCakmCansCariChamCherChrsCirtCoptCpmnCprtCyrlCyrsDevaDiak" +
+ "DogrDsrtDuplEgydEgyhEgypElbaElymEthiGeokGeorGlagGongGonmGothGranGrekGujr" +
+ "GuruHanbHangHaniHanoHansHantHatrHebrHiraHluwHmngHmnpHrktHungIndsItalJamo" +
+ "JavaJpanJurcKaliKanaKawiKharKhmrKhojKitlKitsKndaKoreKpelKthiLanaLaooLatf" +
+ "LatgLatnLekeLepcLimbLinaLinbLisuLomaLyciLydiMahjMakaMandManiMarcMayaMedf" +
+ "MendMercMeroMlymModiMongMoonMrooMteiMultMymrNagmNandNarbNbatNewaNkdbNkgb" +
+ "NkooNshuOgamOlckOrkhOryaOsgeOsmaOugrPalmPaucPcunPelmPermPhagPhliPhlpPhlv" +
+ "PhnxPiqdPlrdPrtiPsinQaaaQaabQaacQaadQaaeQaafQaagQaahQaaiQaajQaakQaalQaam" +
+ "QaanQaaoQaapQaaqQaarQaasQaatQaauQaavQaawQaaxQaayQaazQabaQabbQabcQabdQabe" +
+ "QabfQabgQabhQabiQabjQabkQablQabmQabnQaboQabpQabqQabrQabsQabtQabuQabvQabw" +
+ "QabxRanjRjngRohgRoroRunrSamrSaraSarbSaurSgnwShawShrdShuiSiddSindSinhSogd" +
+ "SogoSoraSoyoSundSunuSyloSyrcSyreSyrjSyrnTagbTakrTaleTaluTamlTangTavtTelu" +
+ "TengTfngTglgThaaThaiTibtTirhTnsaTotoUgarVaiiVispVithWaraWchoWoleXpeoXsux" +
+ "YeziYiiiZanbZinhZmthZsyeZsymZxxxZyyyZzzz\xff\xff\xff\xff"
+
+// suppressScript is an index from langID to the dominant script for that language,
+// if it exists. If a script is given, it should be suppressed from the language tag.
+// Size: 1330 bytes, 1330 elements
+var suppressScript = [1330]uint8{
+ // Entry 0 - 3F
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00,
+ // Entry 40 - 7F
+ 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
+ // Entry 80 - BF
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ // Entry C0 - FF
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00,
+ // Entry 100 - 13F
+ 0x5b, 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xed, 0x00, 0x00, 0x00, 0x00, 0xef, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x34, 0x00,
+ 0x00, 0x5b, 0x00, 0x00, 0x5b, 0x00, 0x5b, 0x00,
+ // Entry 140 - 17F
+ 0x5b, 0x00, 0x00, 0x00, 0x00, 0x5b, 0x00, 0x00,
+ 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x5b, 0x00, 0x00, 0x00, 0x5b, 0x00, 0x00,
+ 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, 0x00,
+ 0x00, 0x5b, 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, 0x5b, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ // Entry 180 - 1BF
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x5b, 0x00, 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x5b, 0x35, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x3e, 0x00, 0x22, 0x00,
+ // Entry 1C0 - 1FF
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x5b, 0x5b, 0x00, 0x5b, 0x5b, 0x00, 0x08,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00, 0x00,
+ 0x5b, 0x5b, 0x00, 0x3e, 0x00, 0x00, 0x00, 0x00,
+ // Entry 200 - 23F
+ 0x49, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x2e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ // Entry 240 - 27F
+ 0x00, 0x00, 0x20, 0x00, 0x00, 0x5b, 0x00, 0x00,
+ 0x00, 0x00, 0x4f, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x53, 0x00, 0x00, 0x54, 0x00, 0x22, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ // Entry 280 - 2BF
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, 0x00, 0x00,
+ 0x58, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ // Entry 2C0 - 2FF
+ 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x22, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20,
+ // Entry 300 - 33F
+ 0x00, 0x00, 0x00, 0x00, 0x6f, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, 0x5b,
+ 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, 0x00,
+ // Entry 340 - 37F
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, 0x00,
+ 0x5b, 0x22, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b,
+ 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00, 0x00, 0x5b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x5b, 0x00,
+ 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00,
+ // Entry 380 - 3BF
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x5b, 0x00, 0x00, 0x00, 0x00, 0x83, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00,
+ // Entry 3C0 - 3FF
+ 0x5b, 0x00, 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00,
+ 0x00, 0x5b, 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x20, 0x00, 0x00, 0x5b, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ // Entry 400 - 43F
+ 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xd6, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00, 0x5b, 0x00,
+ 0x00, 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b,
+ 0x00, 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00, 0x00,
+ // Entry 440 - 47F
+ 0x00, 0x00, 0x00, 0x00, 0x5b, 0x5b, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xe6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0xe9, 0x00, 0x5b, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0xee, 0x00, 0x00, 0x00, 0x2c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b,
+ 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00, 0x5b, 0x00,
+ // Entry 480 - 4BF
+ 0x5b, 0x00, 0x5b, 0x00, 0x00, 0x00, 0x5b, 0x00,
+ 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00, 0x5b, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ // Entry 4C0 - 4FF
+ 0x5b, 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ // Entry 500 - 53F
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x3e, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b,
+ 0x00, 0x00,
+}
+
+const (
+ _001 = 1
+ _419 = 31
+ _BR = 65
+ _CA = 73
+ _ES = 111
+ _GB = 124
+ _MD = 189
+ _PT = 239
+ _UK = 307
+ _US = 310
+ _ZZ = 358
+ _XA = 324
+ _XC = 326
+ _XK = 334
+)
+
+// isoRegionOffset needs to be added to the index of regionISO to obtain the regionID
+// for 2-letter ISO codes. (The first isoRegionOffset regionIDs are reserved for
+// the UN.M49 codes used for groups.)
+const isoRegionOffset = 32
+
+// regionTypes defines the status of a region for various standards.
+// Size: 359 bytes, 359 elements
+var regionTypes = [359]uint8{
+ // Entry 0 - 3F
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x05, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
+ 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
+ 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
+ 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
+ // Entry 40 - 7F
+ 0x06, 0x06, 0x06, 0x06, 0x04, 0x06, 0x06, 0x06,
+ 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
+ 0x06, 0x06, 0x06, 0x06, 0x06, 0x04, 0x04, 0x06,
+ 0x04, 0x00, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
+ 0x04, 0x06, 0x04, 0x06, 0x06, 0x06, 0x06, 0x00,
+ 0x06, 0x04, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
+ 0x06, 0x06, 0x04, 0x06, 0x06, 0x06, 0x06, 0x06,
+ 0x00, 0x06, 0x04, 0x06, 0x06, 0x06, 0x06, 0x06,
+ // Entry 80 - BF
+ 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
+ 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
+ 0x06, 0x06, 0x06, 0x06, 0x00, 0x04, 0x06, 0x06,
+ 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
+ 0x06, 0x06, 0x06, 0x06, 0x00, 0x06, 0x06, 0x06,
+ 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
+ 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
+ 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
+ // Entry C0 - FF
+ 0x06, 0x06, 0x00, 0x06, 0x06, 0x06, 0x06, 0x06,
+ 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
+ 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
+ 0x00, 0x06, 0x06, 0x06, 0x06, 0x00, 0x06, 0x04,
+ 0x06, 0x06, 0x06, 0x06, 0x00, 0x06, 0x06, 0x06,
+ 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
+ 0x00, 0x06, 0x06, 0x00, 0x06, 0x05, 0x05, 0x05,
+ 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05,
+ // Entry 100 - 13F
+ 0x05, 0x05, 0x05, 0x06, 0x00, 0x06, 0x06, 0x06,
+ 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
+ 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
+ 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x04,
+ 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
+ 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
+ 0x06, 0x06, 0x06, 0x02, 0x06, 0x04, 0x06, 0x06,
+ 0x06, 0x06, 0x06, 0x00, 0x06, 0x06, 0x06, 0x06,
+ // Entry 140 - 17F
+ 0x06, 0x06, 0x00, 0x06, 0x05, 0x05, 0x05, 0x05,
+ 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05,
+ 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05,
+ 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x04, 0x06,
+ 0x06, 0x04, 0x06, 0x06, 0x04, 0x06, 0x05,
+}
+
+// regionISO holds a list of alphabetically sorted 2-letter ISO region codes.
+// Each 2-letter codes is followed by two bytes with the following meaning:
+// - [A-Z}{2}: the first letter of the 2-letter code plus these two
+// letters form the 3-letter ISO code.
+// - 0, n: index into altRegionISO3.
+const regionISO tag.Index = "" + // Size: 1312 bytes
+ "AAAAACSCADNDAEREAFFGAGTGAIIAALLBAMRMANNTAOGOAQTAARRGASSMATUTAUUSAWBWAXLA" +
+ "AZZEBAIHBBRBBDGDBEELBFFABGGRBHHRBIDIBJENBLLMBMMUBNRNBOOLBQESBRRABSHSBTTN" +
+ "BUURBVVTBWWABYLRBZLZCAANCCCKCDODCFAFCGOGCHHECIIVCKOKCLHLCMMRCNHNCOOLCPPT" +
+ "CQ CRRICS\x00\x00CTTECUUBCVPVCWUWCXXRCYYPCZZEDDDRDEEUDGGADJJIDKNKDMMADO" +
+ "OMDYHYDZZAEA ECCUEESTEGGYEHSHERRIESSPETTHEU\x00\x03EZ FIINFJJIFKLKFMSM" +
+ "FOROFQ\x00\x18FRRAFXXXGAABGBBRGDRDGEEOGFUFGGGYGHHAGIIBGLRLGMMBGNINGPLPGQ" +
+ "NQGRRCGS\x00\x06GTTMGUUMGWNBGYUYHKKGHMMDHNNDHRRVHTTIHUUNHVVOIC IDDNIERL" +
+ "ILSRIMMNINNDIOOTIQRQIRRNISSLITTAJEEYJMAMJOORJPPNJTTNKEENKGGZKHHMKIIRKM" +
+ "\x00\x09KNNAKP\x00\x0cKRORKWWTKY\x00\x0fKZAZLAAOLBBNLCCALIIELKKALRBRLSSO" +
+ "LTTULUUXLVVALYBYMAARMCCOMDDAMENEMFAFMGDGMHHLMIIDMKKDMLLIMMMRMNNGMOACMPNP" +
+ "MQTQMRRTMSSRMTLTMUUSMVDVMWWIMXEXMYYSMZOZNAAMNCCLNEERNFFKNGGANHHBNIICNLLD" +
+ "NOORNPPLNQ\x00\x1eNRRUNTTZNUIUNZZLOMMNPAANPCCIPEERPFYFPGNGPHHLPKAKPLOLPM" +
+ "\x00\x12PNCNPRRIPSSEPTRTPUUSPWLWPYRYPZCZQAATQMMMQNNNQOOOQPPPQQQQQRRRQSSS" +
+ "QTTTQU\x00\x03QVVVQWWWQXXXQYYYQZZZREEURHHOROOURS\x00\x15RUUSRWWASAAUSBLB" +
+ "SCYCSDDNSEWESGGPSHHNSIVNSJJMSKVKSLLESMMRSNENSOOMSRURSSSDSTTPSUUNSVLVSXXM" +
+ "SYYRSZWZTAAATCCATDCDTF\x00\x18TGGOTHHATJJKTKKLTLLSTMKMTNUNTOONTPMPTRURTT" +
+ "TOTVUVTWWNTZZAUAKRUGGAUK UMMIUN USSAUYRYUZZBVAATVCCTVDDRVEENVGGBVIIRVN" +
+ "NMVUUTWFLFWKAKWSSMXAAAXBBBXCCCXDDDXEEEXFFFXGGGXHHHXIIIXJJJXKKKXLLLXMMMXN" +
+ "NNXOOOXPPPXQQQXRRRXSSSXTTTXUUUXVVVXWWWXXXXXYYYXZZZYDMDYEEMYT\x00\x1bYUUG" +
+ "ZAAFZMMBZRARZWWEZZZZ\xff\xff\xff\xff"
+
+// altRegionISO3 holds a list of 3-letter region codes that cannot be
+// mapped to 2-letter codes using the default algorithm. This is a short list.
+const altRegionISO3 string = "SCGQUUSGSCOMPRKCYMSPMSRBATFMYTATN"
+
+// altRegionIDs holds a list of regionIDs the positions of which match those
+// of the 3-letter ISO codes in altRegionISO3.
+// Size: 22 bytes, 11 elements
+var altRegionIDs = [11]uint16{
+ 0x0058, 0x0071, 0x0089, 0x00a9, 0x00ab, 0x00ae, 0x00eb, 0x0106,
+ 0x0122, 0x0160, 0x00dd,
+}
+
+// Size: 80 bytes, 20 elements
+var regionOldMap = [20]FromTo{
+ 0: {From: 0x44, To: 0xc5},
+ 1: {From: 0x59, To: 0xa8},
+ 2: {From: 0x60, To: 0x61},
+ 3: {From: 0x67, To: 0x3b},
+ 4: {From: 0x7a, To: 0x79},
+ 5: {From: 0x94, To: 0x37},
+ 6: {From: 0xa4, To: 0x134},
+ 7: {From: 0xc2, To: 0x134},
+ 8: {From: 0xd8, To: 0x140},
+ 9: {From: 0xdd, To: 0x2b},
+ 10: {From: 0xf0, To: 0x134},
+ 11: {From: 0xf3, To: 0xe3},
+ 12: {From: 0xfd, To: 0x71},
+ 13: {From: 0x104, To: 0x165},
+ 14: {From: 0x12b, To: 0x127},
+ 15: {From: 0x133, To: 0x7c},
+ 16: {From: 0x13b, To: 0x13f},
+ 17: {From: 0x142, To: 0x134},
+ 18: {From: 0x15e, To: 0x15f},
+ 19: {From: 0x164, To: 0x4b},
+}
+
+// m49 maps regionIDs to UN.M49 codes. The first isoRegionOffset entries are
+// codes indicating collections of regions.
+// Size: 718 bytes, 359 elements
+var m49 = [359]int16{
+ // Entry 0 - 3F
+ 0, 1, 2, 3, 5, 9, 11, 13,
+ 14, 15, 17, 18, 19, 21, 29, 30,
+ 34, 35, 39, 53, 54, 57, 61, 142,
+ 143, 145, 150, 151, 154, 155, 202, 419,
+ 958, 0, 20, 784, 4, 28, 660, 8,
+ 51, 530, 24, 10, 32, 16, 40, 36,
+ 533, 248, 31, 70, 52, 50, 56, 854,
+ 100, 48, 108, 204, 652, 60, 96, 68,
+ // Entry 40 - 7F
+ 535, 76, 44, 64, 104, 74, 72, 112,
+ 84, 124, 166, 180, 140, 178, 756, 384,
+ 184, 152, 120, 156, 170, 0, 0, 188,
+ 891, 296, 192, 132, 531, 162, 196, 203,
+ 278, 276, 0, 262, 208, 212, 214, 204,
+ 12, 0, 218, 233, 818, 732, 232, 724,
+ 231, 967, 0, 246, 242, 238, 583, 234,
+ 0, 250, 249, 266, 826, 308, 268, 254,
+ // Entry 80 - BF
+ 831, 288, 292, 304, 270, 324, 312, 226,
+ 300, 239, 320, 316, 624, 328, 344, 334,
+ 340, 191, 332, 348, 854, 0, 360, 372,
+ 376, 833, 356, 86, 368, 364, 352, 380,
+ 832, 388, 400, 392, 581, 404, 417, 116,
+ 296, 174, 659, 408, 410, 414, 136, 398,
+ 418, 422, 662, 438, 144, 430, 426, 440,
+ 442, 428, 434, 504, 492, 498, 499, 663,
+ // Entry C0 - FF
+ 450, 584, 581, 807, 466, 104, 496, 446,
+ 580, 474, 478, 500, 470, 480, 462, 454,
+ 484, 458, 508, 516, 540, 562, 574, 566,
+ 548, 558, 528, 578, 524, 10, 520, 536,
+ 570, 554, 512, 591, 0, 604, 258, 598,
+ 608, 586, 616, 666, 612, 630, 275, 620,
+ 581, 585, 600, 591, 634, 959, 960, 961,
+ 962, 963, 964, 965, 966, 967, 968, 969,
+ // Entry 100 - 13F
+ 970, 971, 972, 638, 716, 642, 688, 643,
+ 646, 682, 90, 690, 729, 752, 702, 654,
+ 705, 744, 703, 694, 674, 686, 706, 740,
+ 728, 678, 810, 222, 534, 760, 748, 0,
+ 796, 148, 260, 768, 764, 762, 772, 626,
+ 795, 788, 776, 626, 792, 780, 798, 158,
+ 834, 804, 800, 826, 581, 0, 840, 858,
+ 860, 336, 670, 704, 862, 92, 850, 704,
+ // Entry 140 - 17F
+ 548, 876, 581, 882, 973, 974, 975, 976,
+ 977, 978, 979, 980, 981, 982, 983, 984,
+ 985, 986, 987, 988, 989, 990, 991, 992,
+ 993, 994, 995, 996, 997, 998, 720, 887,
+ 175, 891, 710, 894, 180, 716, 999,
+}
+
+// m49Index gives indexes into fromM49 based on the three most significant bits
+// of a 10-bit UN.M49 code. To search an UN.M49 code in fromM49, search in
+//
+// fromM49[m49Index[msb39(code)]:m49Index[msb3(code)+1]]
+//
+// for an entry where the first 7 bits match the 7 lsb of the UN.M49 code.
+// The region code is stored in the 9 lsb of the indexed value.
+// Size: 18 bytes, 9 elements
+var m49Index = [9]int16{
+ 0, 59, 108, 143, 181, 220, 259, 291,
+ 333,
+}
+
+// fromM49 contains entries to map UN.M49 codes to regions. See m49Index for details.
+// Size: 666 bytes, 333 elements
+var fromM49 = [333]uint16{
+ // Entry 0 - 3F
+ 0x0201, 0x0402, 0x0603, 0x0824, 0x0a04, 0x1027, 0x1205, 0x142b,
+ 0x1606, 0x1868, 0x1a07, 0x1c08, 0x1e09, 0x202d, 0x220a, 0x240b,
+ 0x260c, 0x2822, 0x2a0d, 0x302a, 0x3825, 0x3a0e, 0x3c0f, 0x3e32,
+ 0x402c, 0x4410, 0x4611, 0x482f, 0x4e12, 0x502e, 0x5842, 0x6039,
+ 0x6435, 0x6628, 0x6834, 0x6a13, 0x6c14, 0x7036, 0x7215, 0x783d,
+ 0x7a16, 0x8043, 0x883f, 0x8c33, 0x9046, 0x9445, 0x9841, 0xa848,
+ 0xac9b, 0xb50a, 0xb93d, 0xc03e, 0xc838, 0xd0c5, 0xd83a, 0xe047,
+ 0xe8a7, 0xf052, 0xf849, 0x085b, 0x10ae, 0x184c, 0x1c17, 0x1e18,
+ // Entry 40 - 7F
+ 0x20b4, 0x2219, 0x2921, 0x2c1a, 0x2e1b, 0x3051, 0x341c, 0x361d,
+ 0x3853, 0x3d2f, 0x445d, 0x4c4a, 0x5454, 0x5ca9, 0x5f60, 0x644d,
+ 0x684b, 0x7050, 0x7857, 0x7e91, 0x805a, 0x885e, 0x941e, 0x965f,
+ 0x983b, 0xa064, 0xa865, 0xac66, 0xb46a, 0xbd1b, 0xc487, 0xcc70,
+ 0xce70, 0xd06e, 0xd26b, 0xd477, 0xdc75, 0xde89, 0xe474, 0xec73,
+ 0xf031, 0xf27a, 0xf479, 0xfc7f, 0x04e6, 0x0922, 0x0c63, 0x147b,
+ 0x187e, 0x1c84, 0x26ee, 0x2861, 0x2c60, 0x3061, 0x4081, 0x4882,
+ 0x50a8, 0x5888, 0x6083, 0x687d, 0x7086, 0x788b, 0x808a, 0x8885,
+ // Entry 80 - BF
+ 0x908d, 0x9892, 0x9c8f, 0xa139, 0xa890, 0xb08e, 0xb893, 0xc09e,
+ 0xc89a, 0xd096, 0xd89d, 0xe09c, 0xe897, 0xf098, 0xf89f, 0x004f,
+ 0x08a1, 0x10a3, 0x1caf, 0x20a2, 0x28a5, 0x30ab, 0x34ac, 0x3cad,
+ 0x42a6, 0x44b0, 0x461f, 0x4cb1, 0x54b6, 0x58b9, 0x5cb5, 0x64ba,
+ 0x6cb3, 0x70b7, 0x74b8, 0x7cc7, 0x84c0, 0x8ccf, 0x94d1, 0x9cce,
+ 0xa4c4, 0xaccc, 0xb4c9, 0xbcca, 0xc0cd, 0xc8d0, 0xd8bc, 0xe0c6,
+ 0xe4bd, 0xe6be, 0xe8cb, 0xf0bb, 0xf8d2, 0x00e2, 0x08d3, 0x10de,
+ 0x18dc, 0x20da, 0x2429, 0x265c, 0x2a30, 0x2d1c, 0x2e40, 0x30df,
+ // Entry C0 - FF
+ 0x38d4, 0x4940, 0x54e1, 0x5cd9, 0x64d5, 0x6cd7, 0x74e0, 0x7cd6,
+ 0x84db, 0x88c8, 0x8b34, 0x8e76, 0x90c1, 0x92f1, 0x94e9, 0x9ee3,
+ 0xace7, 0xb0f2, 0xb8e5, 0xc0e8, 0xc8ec, 0xd0ea, 0xd8ef, 0xe08c,
+ 0xe527, 0xeced, 0xf4f4, 0xfd03, 0x0505, 0x0707, 0x0d08, 0x183c,
+ 0x1d0f, 0x26aa, 0x2826, 0x2cb2, 0x2ebf, 0x34eb, 0x3d3a, 0x4514,
+ 0x4d19, 0x5509, 0x5d15, 0x6106, 0x650b, 0x6d13, 0x7d0e, 0x7f12,
+ 0x813f, 0x8310, 0x8516, 0x8d62, 0x9965, 0xa15e, 0xa86f, 0xb118,
+ 0xb30c, 0xb86d, 0xc10c, 0xc917, 0xd111, 0xd91e, 0xe10d, 0xe84e,
+ // Entry 100 - 13F
+ 0xf11d, 0xf525, 0xf924, 0x0123, 0x0926, 0x112a, 0x192d, 0x2023,
+ 0x2929, 0x312c, 0x3728, 0x3920, 0x3d2e, 0x4132, 0x4931, 0x4ec3,
+ 0x551a, 0x646c, 0x747c, 0x7e80, 0x80a0, 0x8299, 0x8530, 0x9136,
+ 0xa53e, 0xac37, 0xb537, 0xb938, 0xbd3c, 0xd941, 0xe543, 0xed5f,
+ 0xef5f, 0xf658, 0xfd63, 0x7c20, 0x7ef5, 0x80f6, 0x82f7, 0x84f8,
+ 0x86f9, 0x88fa, 0x8afb, 0x8cfc, 0x8e71, 0x90fe, 0x92ff, 0x9500,
+ 0x9701, 0x9902, 0x9b44, 0x9d45, 0x9f46, 0xa147, 0xa348, 0xa549,
+ 0xa74a, 0xa94b, 0xab4c, 0xad4d, 0xaf4e, 0xb14f, 0xb350, 0xb551,
+ // Entry 140 - 17F
+ 0xb752, 0xb953, 0xbb54, 0xbd55, 0xbf56, 0xc157, 0xc358, 0xc559,
+ 0xc75a, 0xc95b, 0xcb5c, 0xcd5d, 0xcf66,
+}
+
+// Size: 2128 bytes
+var variantIndex = map[string]uint8{
+ "1606nict": 0x0,
+ "1694acad": 0x1,
+ "1901": 0x2,
+ "1959acad": 0x3,
+ "1994": 0x67,
+ "1996": 0x4,
+ "abl1943": 0x5,
+ "akuapem": 0x6,
+ "alalc97": 0x69,
+ "aluku": 0x7,
+ "ao1990": 0x8,
+ "aranes": 0x9,
+ "arevela": 0xa,
+ "arevmda": 0xb,
+ "arkaika": 0xc,
+ "asante": 0xd,
+ "auvern": 0xe,
+ "baku1926": 0xf,
+ "balanka": 0x10,
+ "barla": 0x11,
+ "basiceng": 0x12,
+ "bauddha": 0x13,
+ "bciav": 0x14,
+ "bcizbl": 0x15,
+ "biscayan": 0x16,
+ "biske": 0x62,
+ "bohoric": 0x17,
+ "boont": 0x18,
+ "bornholm": 0x19,
+ "cisaup": 0x1a,
+ "colb1945": 0x1b,
+ "cornu": 0x1c,
+ "creiss": 0x1d,
+ "dajnko": 0x1e,
+ "ekavsk": 0x1f,
+ "emodeng": 0x20,
+ "fonipa": 0x6a,
+ "fonkirsh": 0x6b,
+ "fonnapa": 0x6c,
+ "fonupa": 0x6d,
+ "fonxsamp": 0x6e,
+ "gallo": 0x21,
+ "gascon": 0x22,
+ "grclass": 0x23,
+ "grital": 0x24,
+ "grmistr": 0x25,
+ "hepburn": 0x26,
+ "heploc": 0x68,
+ "hognorsk": 0x27,
+ "hsistemo": 0x28,
+ "ijekavsk": 0x29,
+ "itihasa": 0x2a,
+ "ivanchov": 0x2b,
+ "jauer": 0x2c,
+ "jyutping": 0x2d,
+ "kkcor": 0x2e,
+ "kociewie": 0x2f,
+ "kscor": 0x30,
+ "laukika": 0x31,
+ "lemosin": 0x32,
+ "lengadoc": 0x33,
+ "lipaw": 0x63,
+ "ltg1929": 0x34,
+ "ltg2007": 0x35,
+ "luna1918": 0x36,
+ "metelko": 0x37,
+ "monoton": 0x38,
+ "ndyuka": 0x39,
+ "nedis": 0x3a,
+ "newfound": 0x3b,
+ "nicard": 0x3c,
+ "njiva": 0x64,
+ "nulik": 0x3d,
+ "osojs": 0x65,
+ "oxendict": 0x3e,
+ "pahawh2": 0x3f,
+ "pahawh3": 0x40,
+ "pahawh4": 0x41,
+ "pamaka": 0x42,
+ "peano": 0x43,
+ "petr1708": 0x44,
+ "pinyin": 0x45,
+ "polyton": 0x46,
+ "provenc": 0x47,
+ "puter": 0x48,
+ "rigik": 0x49,
+ "rozaj": 0x4a,
+ "rumgr": 0x4b,
+ "scotland": 0x4c,
+ "scouse": 0x4d,
+ "simple": 0x6f,
+ "solba": 0x66,
+ "sotav": 0x4e,
+ "spanglis": 0x4f,
+ "surmiran": 0x50,
+ "sursilv": 0x51,
+ "sutsilv": 0x52,
+ "synnejyl": 0x53,
+ "tarask": 0x54,
+ "tongyong": 0x55,
+ "tunumiit": 0x56,
+ "uccor": 0x57,
+ "ucrcor": 0x58,
+ "ulster": 0x59,
+ "unifon": 0x5a,
+ "vaidika": 0x5b,
+ "valencia": 0x5c,
+ "vallader": 0x5d,
+ "vecdruka": 0x5e,
+ "vivaraup": 0x5f,
+ "wadegile": 0x60,
+ "xsistemo": 0x61,
+}
+
+// variantNumSpecialized is the number of specialized variants in variants.
+const variantNumSpecialized = 105
+
+// nRegionGroups is the number of region groups.
+const nRegionGroups = 33
+
+type likelyLangRegion struct {
+ lang uint16
+ region uint16
+}
+
+// likelyScript is a lookup table, indexed by scriptID, for the most likely
+// languages and regions given a script.
+// Size: 1052 bytes, 263 elements
+var likelyScript = [263]likelyLangRegion{
+ 1: {lang: 0x14e, region: 0x85},
+ 3: {lang: 0x2a2, region: 0x107},
+ 4: {lang: 0x1f, region: 0x9a},
+ 5: {lang: 0x3a, region: 0x6c},
+ 7: {lang: 0x3b, region: 0x9d},
+ 8: {lang: 0x1d7, region: 0x28},
+ 9: {lang: 0x13, region: 0x9d},
+ 10: {lang: 0x5b, region: 0x96},
+ 11: {lang: 0x60, region: 0x52},
+ 12: {lang: 0xb9, region: 0xb5},
+ 13: {lang: 0x63, region: 0x96},
+ 14: {lang: 0xa5, region: 0x35},
+ 15: {lang: 0x3e9, region: 0x9a},
+ 17: {lang: 0x529, region: 0x12f},
+ 18: {lang: 0x3b1, region: 0x9a},
+ 19: {lang: 0x15e, region: 0x79},
+ 20: {lang: 0xc2, region: 0x96},
+ 21: {lang: 0x9d, region: 0xe8},
+ 22: {lang: 0xdb, region: 0x35},
+ 23: {lang: 0xf3, region: 0x49},
+ 24: {lang: 0x4f0, region: 0x12c},
+ 25: {lang: 0xe7, region: 0x13f},
+ 26: {lang: 0xe5, region: 0x136},
+ 29: {lang: 0xf1, region: 0x6c},
+ 31: {lang: 0x1a0, region: 0x5e},
+ 32: {lang: 0x3e2, region: 0x107},
+ 34: {lang: 0x1be, region: 0x9a},
+ 38: {lang: 0x15e, region: 0x79},
+ 41: {lang: 0x133, region: 0x6c},
+ 42: {lang: 0x431, region: 0x27},
+ 44: {lang: 0x27, region: 0x70},
+ 46: {lang: 0x210, region: 0x7e},
+ 47: {lang: 0xfe, region: 0x38},
+ 49: {lang: 0x19b, region: 0x9a},
+ 50: {lang: 0x19e, region: 0x131},
+ 51: {lang: 0x3e9, region: 0x9a},
+ 52: {lang: 0x136, region: 0x88},
+ 53: {lang: 0x1a4, region: 0x9a},
+ 54: {lang: 0x39d, region: 0x9a},
+ 55: {lang: 0x529, region: 0x12f},
+ 56: {lang: 0x254, region: 0xac},
+ 57: {lang: 0x529, region: 0x53},
+ 58: {lang: 0x1cb, region: 0xe8},
+ 59: {lang: 0x529, region: 0x53},
+ 60: {lang: 0x529, region: 0x12f},
+ 61: {lang: 0x2fd, region: 0x9c},
+ 62: {lang: 0x1bc, region: 0x98},
+ 63: {lang: 0x200, region: 0xa3},
+ 64: {lang: 0x1c5, region: 0x12c},
+ 65: {lang: 0x1ca, region: 0xb0},
+ 68: {lang: 0x1d5, region: 0x93},
+ 70: {lang: 0x142, region: 0x9f},
+ 71: {lang: 0x254, region: 0xac},
+ 72: {lang: 0x20e, region: 0x96},
+ 73: {lang: 0x200, region: 0xa3},
+ 75: {lang: 0x135, region: 0xc5},
+ 76: {lang: 0x200, region: 0xa3},
+ 78: {lang: 0x3bb, region: 0xe9},
+ 79: {lang: 0x24a, region: 0xa7},
+ 80: {lang: 0x3fa, region: 0x9a},
+ 83: {lang: 0x251, region: 0x9a},
+ 84: {lang: 0x254, region: 0xac},
+ 86: {lang: 0x88, region: 0x9a},
+ 87: {lang: 0x370, region: 0x124},
+ 88: {lang: 0x2b8, region: 0xb0},
+ 93: {lang: 0x29f, region: 0x9a},
+ 94: {lang: 0x2a8, region: 0x9a},
+ 95: {lang: 0x28f, region: 0x88},
+ 96: {lang: 0x1a0, region: 0x88},
+ 97: {lang: 0x2ac, region: 0x53},
+ 99: {lang: 0x4f4, region: 0x12c},
+ 100: {lang: 0x4f5, region: 0x12c},
+ 101: {lang: 0x1be, region: 0x9a},
+ 103: {lang: 0x337, region: 0x9d},
+ 104: {lang: 0x4f7, region: 0x53},
+ 105: {lang: 0xa9, region: 0x53},
+ 108: {lang: 0x2e8, region: 0x113},
+ 109: {lang: 0x4f8, region: 0x10c},
+ 110: {lang: 0x4f8, region: 0x10c},
+ 111: {lang: 0x304, region: 0x9a},
+ 112: {lang: 0x31b, region: 0x9a},
+ 113: {lang: 0x30b, region: 0x53},
+ 115: {lang: 0x31e, region: 0x35},
+ 116: {lang: 0x30e, region: 0x9a},
+ 117: {lang: 0x414, region: 0xe9},
+ 118: {lang: 0x331, region: 0xc5},
+ 121: {lang: 0x4f9, region: 0x109},
+ 122: {lang: 0x3b, region: 0xa2},
+ 123: {lang: 0x353, region: 0xdc},
+ 126: {lang: 0x2d0, region: 0x85},
+ 127: {lang: 0x52a, region: 0x53},
+ 128: {lang: 0x403, region: 0x97},
+ 129: {lang: 0x3ee, region: 0x9a},
+ 130: {lang: 0x39b, region: 0xc6},
+ 131: {lang: 0x395, region: 0x9a},
+ 132: {lang: 0x399, region: 0x136},
+ 133: {lang: 0x429, region: 0x116},
+ 135: {lang: 0x3b, region: 0x11d},
+ 136: {lang: 0xfd, region: 0xc5},
+ 139: {lang: 0x27d, region: 0x107},
+ 140: {lang: 0x2c9, region: 0x53},
+ 141: {lang: 0x39f, region: 0x9d},
+ 142: {lang: 0x39f, region: 0x53},
+ 144: {lang: 0x3ad, region: 0xb1},
+ 146: {lang: 0x1c6, region: 0x53},
+ 147: {lang: 0x4fd, region: 0x9d},
+ 200: {lang: 0x3cb, region: 0x96},
+ 203: {lang: 0x372, region: 0x10d},
+ 204: {lang: 0x420, region: 0x98},
+ 206: {lang: 0x4ff, region: 0x15f},
+ 207: {lang: 0x3f0, region: 0x9a},
+ 208: {lang: 0x45, region: 0x136},
+ 209: {lang: 0x139, region: 0x7c},
+ 210: {lang: 0x3e9, region: 0x9a},
+ 212: {lang: 0x3e9, region: 0x9a},
+ 213: {lang: 0x3fa, region: 0x9a},
+ 214: {lang: 0x40c, region: 0xb4},
+ 217: {lang: 0x433, region: 0x9a},
+ 218: {lang: 0xef, region: 0xc6},
+ 219: {lang: 0x43e, region: 0x96},
+ 221: {lang: 0x44d, region: 0x35},
+ 222: {lang: 0x44e, region: 0x9c},
+ 226: {lang: 0x45a, region: 0xe8},
+ 227: {lang: 0x11a, region: 0x9a},
+ 228: {lang: 0x45e, region: 0x53},
+ 229: {lang: 0x232, region: 0x53},
+ 230: {lang: 0x450, region: 0x9a},
+ 231: {lang: 0x4a5, region: 0x53},
+ 232: {lang: 0x9f, region: 0x13f},
+ 233: {lang: 0x461, region: 0x9a},
+ 235: {lang: 0x528, region: 0xbb},
+ 236: {lang: 0x153, region: 0xe8},
+ 237: {lang: 0x128, region: 0xce},
+ 238: {lang: 0x46b, region: 0x124},
+ 239: {lang: 0xa9, region: 0x53},
+ 240: {lang: 0x2ce, region: 0x9a},
+ 243: {lang: 0x4ad, region: 0x11d},
+ 244: {lang: 0x4be, region: 0xb5},
+ 247: {lang: 0x1ce, region: 0x9a},
+ 250: {lang: 0x3a9, region: 0x9d},
+ 251: {lang: 0x22, region: 0x9c},
+ 253: {lang: 0x1ea, region: 0x53},
+ 254: {lang: 0xef, region: 0xc6},
+}
+
+type likelyScriptRegion struct {
+ region uint16
+ script uint16
+ flags uint8
+}
+
+// likelyLang is a lookup table, indexed by langID, for the most likely
+// scripts and regions given incomplete information. If more entries exist for a
+// given language, region and script are the index and size respectively
+// of the list in likelyLangList.
+// Size: 7980 bytes, 1330 elements
+var likelyLang = [1330]likelyScriptRegion{
+ 0: {region: 0x136, script: 0x5b, flags: 0x0},
+ 1: {region: 0x70, script: 0x5b, flags: 0x0},
+ 2: {region: 0x166, script: 0x5b, flags: 0x0},
+ 3: {region: 0x166, script: 0x5b, flags: 0x0},
+ 4: {region: 0x166, script: 0x5b, flags: 0x0},
+ 5: {region: 0x7e, script: 0x20, flags: 0x0},
+ 6: {region: 0x166, script: 0x5b, flags: 0x0},
+ 7: {region: 0x166, script: 0x20, flags: 0x0},
+ 8: {region: 0x81, script: 0x5b, flags: 0x0},
+ 9: {region: 0x166, script: 0x5b, flags: 0x0},
+ 10: {region: 0x166, script: 0x5b, flags: 0x0},
+ 11: {region: 0x166, script: 0x5b, flags: 0x0},
+ 12: {region: 0x96, script: 0x5b, flags: 0x0},
+ 13: {region: 0x132, script: 0x5b, flags: 0x0},
+ 14: {region: 0x81, script: 0x5b, flags: 0x0},
+ 15: {region: 0x166, script: 0x5b, flags: 0x0},
+ 16: {region: 0x166, script: 0x5b, flags: 0x0},
+ 17: {region: 0x107, script: 0x20, flags: 0x0},
+ 18: {region: 0x166, script: 0x5b, flags: 0x0},
+ 19: {region: 0x9d, script: 0x9, flags: 0x0},
+ 20: {region: 0x129, script: 0x5, flags: 0x0},
+ 21: {region: 0x166, script: 0x5b, flags: 0x0},
+ 22: {region: 0x162, script: 0x5b, flags: 0x0},
+ 23: {region: 0x166, script: 0x5b, flags: 0x0},
+ 24: {region: 0x166, script: 0x5b, flags: 0x0},
+ 25: {region: 0x166, script: 0x5b, flags: 0x0},
+ 26: {region: 0x166, script: 0x5b, flags: 0x0},
+ 27: {region: 0x166, script: 0x5b, flags: 0x0},
+ 28: {region: 0x52, script: 0x5b, flags: 0x0},
+ 29: {region: 0x166, script: 0x5b, flags: 0x0},
+ 30: {region: 0x166, script: 0x5b, flags: 0x0},
+ 31: {region: 0x9a, script: 0x4, flags: 0x0},
+ 32: {region: 0x166, script: 0x5b, flags: 0x0},
+ 33: {region: 0x81, script: 0x5b, flags: 0x0},
+ 34: {region: 0x9c, script: 0xfb, flags: 0x0},
+ 35: {region: 0x166, script: 0x5b, flags: 0x0},
+ 36: {region: 0x166, script: 0x5b, flags: 0x0},
+ 37: {region: 0x14e, script: 0x5b, flags: 0x0},
+ 38: {region: 0x107, script: 0x20, flags: 0x0},
+ 39: {region: 0x70, script: 0x2c, flags: 0x0},
+ 40: {region: 0x166, script: 0x5b, flags: 0x0},
+ 41: {region: 0x166, script: 0x5b, flags: 0x0},
+ 42: {region: 0xd7, script: 0x5b, flags: 0x0},
+ 43: {region: 0x166, script: 0x5b, flags: 0x0},
+ 45: {region: 0x166, script: 0x5b, flags: 0x0},
+ 46: {region: 0x166, script: 0x5b, flags: 0x0},
+ 47: {region: 0x166, script: 0x5b, flags: 0x0},
+ 48: {region: 0x166, script: 0x5b, flags: 0x0},
+ 49: {region: 0x166, script: 0x5b, flags: 0x0},
+ 50: {region: 0x166, script: 0x5b, flags: 0x0},
+ 51: {region: 0x96, script: 0x5b, flags: 0x0},
+ 52: {region: 0x166, script: 0x5, flags: 0x0},
+ 53: {region: 0x123, script: 0x5, flags: 0x0},
+ 54: {region: 0x166, script: 0x5b, flags: 0x0},
+ 55: {region: 0x166, script: 0x5b, flags: 0x0},
+ 56: {region: 0x166, script: 0x5b, flags: 0x0},
+ 57: {region: 0x166, script: 0x5b, flags: 0x0},
+ 58: {region: 0x6c, script: 0x5, flags: 0x0},
+ 59: {region: 0x0, script: 0x3, flags: 0x1},
+ 60: {region: 0x166, script: 0x5b, flags: 0x0},
+ 61: {region: 0x51, script: 0x5b, flags: 0x0},
+ 62: {region: 0x3f, script: 0x5b, flags: 0x0},
+ 63: {region: 0x68, script: 0x5, flags: 0x0},
+ 65: {region: 0xbb, script: 0x5, flags: 0x0},
+ 66: {region: 0x6c, script: 0x5, flags: 0x0},
+ 67: {region: 0x9a, script: 0xe, flags: 0x0},
+ 68: {region: 0x130, script: 0x5b, flags: 0x0},
+ 69: {region: 0x136, script: 0xd0, flags: 0x0},
+ 70: {region: 0x166, script: 0x5b, flags: 0x0},
+ 71: {region: 0x166, script: 0x5b, flags: 0x0},
+ 72: {region: 0x6f, script: 0x5b, flags: 0x0},
+ 73: {region: 0x166, script: 0x5b, flags: 0x0},
+ 74: {region: 0x166, script: 0x5b, flags: 0x0},
+ 75: {region: 0x49, script: 0x5b, flags: 0x0},
+ 76: {region: 0x166, script: 0x5b, flags: 0x0},
+ 77: {region: 0x107, script: 0x20, flags: 0x0},
+ 78: {region: 0x166, script: 0x5, flags: 0x0},
+ 79: {region: 0x166, script: 0x5b, flags: 0x0},
+ 80: {region: 0x166, script: 0x5b, flags: 0x0},
+ 81: {region: 0x166, script: 0x5b, flags: 0x0},
+ 82: {region: 0x9a, script: 0x22, flags: 0x0},
+ 83: {region: 0x166, script: 0x5b, flags: 0x0},
+ 84: {region: 0x166, script: 0x5b, flags: 0x0},
+ 85: {region: 0x166, script: 0x5b, flags: 0x0},
+ 86: {region: 0x3f, script: 0x5b, flags: 0x0},
+ 87: {region: 0x166, script: 0x5b, flags: 0x0},
+ 88: {region: 0x3, script: 0x5, flags: 0x1},
+ 89: {region: 0x107, script: 0x20, flags: 0x0},
+ 90: {region: 0xe9, script: 0x5, flags: 0x0},
+ 91: {region: 0x96, script: 0x5b, flags: 0x0},
+ 92: {region: 0xdc, script: 0x22, flags: 0x0},
+ 93: {region: 0x2e, script: 0x5b, flags: 0x0},
+ 94: {region: 0x52, script: 0x5b, flags: 0x0},
+ 95: {region: 0x166, script: 0x5b, flags: 0x0},
+ 96: {region: 0x52, script: 0xb, flags: 0x0},
+ 97: {region: 0x166, script: 0x5b, flags: 0x0},
+ 98: {region: 0x166, script: 0x5b, flags: 0x0},
+ 99: {region: 0x96, script: 0x5b, flags: 0x0},
+ 100: {region: 0x166, script: 0x5b, flags: 0x0},
+ 101: {region: 0x52, script: 0x5b, flags: 0x0},
+ 102: {region: 0x166, script: 0x5b, flags: 0x0},
+ 103: {region: 0x166, script: 0x5b, flags: 0x0},
+ 104: {region: 0x166, script: 0x5b, flags: 0x0},
+ 105: {region: 0x166, script: 0x5b, flags: 0x0},
+ 106: {region: 0x4f, script: 0x5b, flags: 0x0},
+ 107: {region: 0x166, script: 0x5b, flags: 0x0},
+ 108: {region: 0x166, script: 0x5b, flags: 0x0},
+ 109: {region: 0x166, script: 0x5b, flags: 0x0},
+ 110: {region: 0x166, script: 0x2c, flags: 0x0},
+ 111: {region: 0x166, script: 0x5b, flags: 0x0},
+ 112: {region: 0x166, script: 0x5b, flags: 0x0},
+ 113: {region: 0x47, script: 0x20, flags: 0x0},
+ 114: {region: 0x166, script: 0x5b, flags: 0x0},
+ 115: {region: 0x166, script: 0x5b, flags: 0x0},
+ 116: {region: 0x10c, script: 0x5, flags: 0x0},
+ 117: {region: 0x163, script: 0x5b, flags: 0x0},
+ 118: {region: 0x166, script: 0x5b, flags: 0x0},
+ 119: {region: 0x96, script: 0x5b, flags: 0x0},
+ 120: {region: 0x166, script: 0x5b, flags: 0x0},
+ 121: {region: 0x130, script: 0x5b, flags: 0x0},
+ 122: {region: 0x52, script: 0x5b, flags: 0x0},
+ 123: {region: 0x9a, script: 0xe6, flags: 0x0},
+ 124: {region: 0xe9, script: 0x5, flags: 0x0},
+ 125: {region: 0x9a, script: 0x22, flags: 0x0},
+ 126: {region: 0x38, script: 0x20, flags: 0x0},
+ 127: {region: 0x9a, script: 0x22, flags: 0x0},
+ 128: {region: 0xe9, script: 0x5, flags: 0x0},
+ 129: {region: 0x12c, script: 0x34, flags: 0x0},
+ 131: {region: 0x9a, script: 0x22, flags: 0x0},
+ 132: {region: 0x166, script: 0x5b, flags: 0x0},
+ 133: {region: 0x9a, script: 0x22, flags: 0x0},
+ 134: {region: 0xe8, script: 0x5b, flags: 0x0},
+ 135: {region: 0x166, script: 0x5b, flags: 0x0},
+ 136: {region: 0x9a, script: 0x22, flags: 0x0},
+ 137: {region: 0x166, script: 0x5b, flags: 0x0},
+ 138: {region: 0x140, script: 0x5b, flags: 0x0},
+ 139: {region: 0x166, script: 0x5b, flags: 0x0},
+ 140: {region: 0x166, script: 0x5b, flags: 0x0},
+ 141: {region: 0xe8, script: 0x5b, flags: 0x0},
+ 142: {region: 0x166, script: 0x5b, flags: 0x0},
+ 143: {region: 0xd7, script: 0x5b, flags: 0x0},
+ 144: {region: 0x166, script: 0x5b, flags: 0x0},
+ 145: {region: 0x166, script: 0x5b, flags: 0x0},
+ 146: {region: 0x166, script: 0x5b, flags: 0x0},
+ 147: {region: 0x166, script: 0x2c, flags: 0x0},
+ 148: {region: 0x9a, script: 0x22, flags: 0x0},
+ 149: {region: 0x96, script: 0x5b, flags: 0x0},
+ 150: {region: 0x166, script: 0x5b, flags: 0x0},
+ 151: {region: 0x166, script: 0x5b, flags: 0x0},
+ 152: {region: 0x115, script: 0x5b, flags: 0x0},
+ 153: {region: 0x166, script: 0x5b, flags: 0x0},
+ 154: {region: 0x166, script: 0x5b, flags: 0x0},
+ 155: {region: 0x52, script: 0x5b, flags: 0x0},
+ 156: {region: 0x166, script: 0x5b, flags: 0x0},
+ 157: {region: 0xe8, script: 0x5b, flags: 0x0},
+ 158: {region: 0x166, script: 0x5b, flags: 0x0},
+ 159: {region: 0x13f, script: 0xe8, flags: 0x0},
+ 160: {region: 0xc4, script: 0x5b, flags: 0x0},
+ 161: {region: 0x166, script: 0x5b, flags: 0x0},
+ 162: {region: 0x166, script: 0x5b, flags: 0x0},
+ 163: {region: 0xc4, script: 0x5b, flags: 0x0},
+ 164: {region: 0x166, script: 0x5b, flags: 0x0},
+ 165: {region: 0x35, script: 0xe, flags: 0x0},
+ 166: {region: 0x166, script: 0x5b, flags: 0x0},
+ 167: {region: 0x166, script: 0x5b, flags: 0x0},
+ 168: {region: 0x166, script: 0x5b, flags: 0x0},
+ 169: {region: 0x53, script: 0xef, flags: 0x0},
+ 170: {region: 0x166, script: 0x5b, flags: 0x0},
+ 171: {region: 0x166, script: 0x5b, flags: 0x0},
+ 172: {region: 0x166, script: 0x5b, flags: 0x0},
+ 173: {region: 0x9a, script: 0xe, flags: 0x0},
+ 174: {region: 0x166, script: 0x5b, flags: 0x0},
+ 175: {region: 0x9d, script: 0x5, flags: 0x0},
+ 176: {region: 0x166, script: 0x5b, flags: 0x0},
+ 177: {region: 0x4f, script: 0x5b, flags: 0x0},
+ 178: {region: 0x79, script: 0x5b, flags: 0x0},
+ 179: {region: 0x9a, script: 0x22, flags: 0x0},
+ 180: {region: 0xe9, script: 0x5, flags: 0x0},
+ 181: {region: 0x9a, script: 0x22, flags: 0x0},
+ 182: {region: 0x166, script: 0x5b, flags: 0x0},
+ 183: {region: 0x33, script: 0x5b, flags: 0x0},
+ 184: {region: 0x166, script: 0x5b, flags: 0x0},
+ 185: {region: 0xb5, script: 0xc, flags: 0x0},
+ 186: {region: 0x52, script: 0x5b, flags: 0x0},
+ 187: {region: 0x166, script: 0x2c, flags: 0x0},
+ 188: {region: 0xe8, script: 0x5b, flags: 0x0},
+ 189: {region: 0x166, script: 0x5b, flags: 0x0},
+ 190: {region: 0xe9, script: 0x22, flags: 0x0},
+ 191: {region: 0x107, script: 0x20, flags: 0x0},
+ 192: {region: 0x160, script: 0x5b, flags: 0x0},
+ 193: {region: 0x166, script: 0x5b, flags: 0x0},
+ 194: {region: 0x96, script: 0x5b, flags: 0x0},
+ 195: {region: 0x166, script: 0x5b, flags: 0x0},
+ 196: {region: 0x52, script: 0x5b, flags: 0x0},
+ 197: {region: 0x166, script: 0x5b, flags: 0x0},
+ 198: {region: 0x166, script: 0x5b, flags: 0x0},
+ 199: {region: 0x166, script: 0x5b, flags: 0x0},
+ 200: {region: 0x87, script: 0x5b, flags: 0x0},
+ 201: {region: 0x166, script: 0x5b, flags: 0x0},
+ 202: {region: 0x166, script: 0x5b, flags: 0x0},
+ 203: {region: 0x166, script: 0x5b, flags: 0x0},
+ 204: {region: 0x166, script: 0x5b, flags: 0x0},
+ 205: {region: 0x6e, script: 0x2c, flags: 0x0},
+ 206: {region: 0x166, script: 0x5b, flags: 0x0},
+ 207: {region: 0x166, script: 0x5b, flags: 0x0},
+ 208: {region: 0x52, script: 0x5b, flags: 0x0},
+ 209: {region: 0x166, script: 0x5b, flags: 0x0},
+ 210: {region: 0x166, script: 0x5b, flags: 0x0},
+ 211: {region: 0xc4, script: 0x5b, flags: 0x0},
+ 212: {region: 0x166, script: 0x5b, flags: 0x0},
+ 213: {region: 0x166, script: 0x5b, flags: 0x0},
+ 214: {region: 0x166, script: 0x5b, flags: 0x0},
+ 215: {region: 0x6f, script: 0x5b, flags: 0x0},
+ 216: {region: 0x166, script: 0x5b, flags: 0x0},
+ 217: {region: 0x166, script: 0x5b, flags: 0x0},
+ 218: {region: 0xd7, script: 0x5b, flags: 0x0},
+ 219: {region: 0x35, script: 0x16, flags: 0x0},
+ 220: {region: 0x107, script: 0x20, flags: 0x0},
+ 221: {region: 0xe8, script: 0x5b, flags: 0x0},
+ 222: {region: 0x166, script: 0x5b, flags: 0x0},
+ 223: {region: 0x132, script: 0x5b, flags: 0x0},
+ 224: {region: 0x8b, script: 0x5b, flags: 0x0},
+ 225: {region: 0x76, script: 0x5b, flags: 0x0},
+ 226: {region: 0x107, script: 0x20, flags: 0x0},
+ 227: {region: 0x136, script: 0x5b, flags: 0x0},
+ 228: {region: 0x49, script: 0x5b, flags: 0x0},
+ 229: {region: 0x136, script: 0x1a, flags: 0x0},
+ 230: {region: 0xa7, script: 0x5, flags: 0x0},
+ 231: {region: 0x13f, script: 0x19, flags: 0x0},
+ 232: {region: 0x166, script: 0x5b, flags: 0x0},
+ 233: {region: 0x9c, script: 0x5, flags: 0x0},
+ 234: {region: 0x166, script: 0x5b, flags: 0x0},
+ 235: {region: 0x166, script: 0x5b, flags: 0x0},
+ 236: {region: 0x166, script: 0x5b, flags: 0x0},
+ 237: {region: 0x166, script: 0x5b, flags: 0x0},
+ 238: {region: 0x166, script: 0x5b, flags: 0x0},
+ 239: {region: 0xc6, script: 0xda, flags: 0x0},
+ 240: {region: 0x79, script: 0x5b, flags: 0x0},
+ 241: {region: 0x6c, script: 0x1d, flags: 0x0},
+ 242: {region: 0xe8, script: 0x5b, flags: 0x0},
+ 243: {region: 0x49, script: 0x17, flags: 0x0},
+ 244: {region: 0x131, script: 0x20, flags: 0x0},
+ 245: {region: 0x49, script: 0x17, flags: 0x0},
+ 246: {region: 0x49, script: 0x17, flags: 0x0},
+ 247: {region: 0x49, script: 0x17, flags: 0x0},
+ 248: {region: 0x49, script: 0x17, flags: 0x0},
+ 249: {region: 0x10b, script: 0x5b, flags: 0x0},
+ 250: {region: 0x5f, script: 0x5b, flags: 0x0},
+ 251: {region: 0xea, script: 0x5b, flags: 0x0},
+ 252: {region: 0x49, script: 0x17, flags: 0x0},
+ 253: {region: 0xc5, script: 0x88, flags: 0x0},
+ 254: {region: 0x8, script: 0x2, flags: 0x1},
+ 255: {region: 0x107, script: 0x20, flags: 0x0},
+ 256: {region: 0x7c, script: 0x5b, flags: 0x0},
+ 257: {region: 0x64, script: 0x5b, flags: 0x0},
+ 258: {region: 0x166, script: 0x5b, flags: 0x0},
+ 259: {region: 0x166, script: 0x5b, flags: 0x0},
+ 260: {region: 0x166, script: 0x5b, flags: 0x0},
+ 261: {region: 0x166, script: 0x5b, flags: 0x0},
+ 262: {region: 0x136, script: 0x5b, flags: 0x0},
+ 263: {region: 0x107, script: 0x20, flags: 0x0},
+ 264: {region: 0xa5, script: 0x5b, flags: 0x0},
+ 265: {region: 0x166, script: 0x5b, flags: 0x0},
+ 266: {region: 0x166, script: 0x5b, flags: 0x0},
+ 267: {region: 0x9a, script: 0x5, flags: 0x0},
+ 268: {region: 0x166, script: 0x5b, flags: 0x0},
+ 269: {region: 0x61, script: 0x5b, flags: 0x0},
+ 270: {region: 0x166, script: 0x5b, flags: 0x0},
+ 271: {region: 0x49, script: 0x5b, flags: 0x0},
+ 272: {region: 0x166, script: 0x5b, flags: 0x0},
+ 273: {region: 0x166, script: 0x5b, flags: 0x0},
+ 274: {region: 0x166, script: 0x5b, flags: 0x0},
+ 275: {region: 0x166, script: 0x5, flags: 0x0},
+ 276: {region: 0x49, script: 0x5b, flags: 0x0},
+ 277: {region: 0x166, script: 0x5b, flags: 0x0},
+ 278: {region: 0x166, script: 0x5b, flags: 0x0},
+ 279: {region: 0xd5, script: 0x5b, flags: 0x0},
+ 280: {region: 0x4f, script: 0x5b, flags: 0x0},
+ 281: {region: 0x166, script: 0x5b, flags: 0x0},
+ 282: {region: 0x9a, script: 0x5, flags: 0x0},
+ 283: {region: 0x166, script: 0x5b, flags: 0x0},
+ 284: {region: 0x166, script: 0x5b, flags: 0x0},
+ 285: {region: 0x166, script: 0x5b, flags: 0x0},
+ 286: {region: 0x166, script: 0x2c, flags: 0x0},
+ 287: {region: 0x61, script: 0x5b, flags: 0x0},
+ 288: {region: 0xc4, script: 0x5b, flags: 0x0},
+ 289: {region: 0xd1, script: 0x5b, flags: 0x0},
+ 290: {region: 0x166, script: 0x5b, flags: 0x0},
+ 291: {region: 0xdc, script: 0x22, flags: 0x0},
+ 292: {region: 0x52, script: 0x5b, flags: 0x0},
+ 293: {region: 0x166, script: 0x5b, flags: 0x0},
+ 294: {region: 0x166, script: 0x5b, flags: 0x0},
+ 295: {region: 0x166, script: 0x5b, flags: 0x0},
+ 296: {region: 0xce, script: 0xed, flags: 0x0},
+ 297: {region: 0x166, script: 0x5b, flags: 0x0},
+ 298: {region: 0x166, script: 0x5b, flags: 0x0},
+ 299: {region: 0x115, script: 0x5b, flags: 0x0},
+ 300: {region: 0x37, script: 0x5b, flags: 0x0},
+ 301: {region: 0x43, script: 0xef, flags: 0x0},
+ 302: {region: 0x166, script: 0x5b, flags: 0x0},
+ 303: {region: 0xa5, script: 0x5b, flags: 0x0},
+ 304: {region: 0x81, script: 0x5b, flags: 0x0},
+ 305: {region: 0xd7, script: 0x5b, flags: 0x0},
+ 306: {region: 0x9f, script: 0x5b, flags: 0x0},
+ 307: {region: 0x6c, script: 0x29, flags: 0x0},
+ 308: {region: 0x166, script: 0x5b, flags: 0x0},
+ 309: {region: 0xc5, script: 0x4b, flags: 0x0},
+ 310: {region: 0x88, script: 0x34, flags: 0x0},
+ 311: {region: 0x166, script: 0x5b, flags: 0x0},
+ 312: {region: 0x166, script: 0x5b, flags: 0x0},
+ 313: {region: 0xa, script: 0x2, flags: 0x1},
+ 314: {region: 0x166, script: 0x5b, flags: 0x0},
+ 315: {region: 0x166, script: 0x5b, flags: 0x0},
+ 316: {region: 0x1, script: 0x5b, flags: 0x0},
+ 317: {region: 0x166, script: 0x5b, flags: 0x0},
+ 318: {region: 0x6f, script: 0x5b, flags: 0x0},
+ 319: {region: 0x136, script: 0x5b, flags: 0x0},
+ 320: {region: 0x6b, script: 0x5b, flags: 0x0},
+ 321: {region: 0x166, script: 0x5b, flags: 0x0},
+ 322: {region: 0x9f, script: 0x46, flags: 0x0},
+ 323: {region: 0x166, script: 0x5b, flags: 0x0},
+ 324: {region: 0x166, script: 0x5b, flags: 0x0},
+ 325: {region: 0x6f, script: 0x5b, flags: 0x0},
+ 326: {region: 0x52, script: 0x5b, flags: 0x0},
+ 327: {region: 0x6f, script: 0x5b, flags: 0x0},
+ 328: {region: 0x9d, script: 0x5, flags: 0x0},
+ 329: {region: 0x166, script: 0x5b, flags: 0x0},
+ 330: {region: 0x166, script: 0x5b, flags: 0x0},
+ 331: {region: 0x166, script: 0x5b, flags: 0x0},
+ 332: {region: 0x166, script: 0x5b, flags: 0x0},
+ 333: {region: 0x87, script: 0x5b, flags: 0x0},
+ 334: {region: 0xc, script: 0x2, flags: 0x1},
+ 335: {region: 0x166, script: 0x5b, flags: 0x0},
+ 336: {region: 0xc4, script: 0x5b, flags: 0x0},
+ 337: {region: 0x73, script: 0x5b, flags: 0x0},
+ 338: {region: 0x10c, script: 0x5, flags: 0x0},
+ 339: {region: 0xe8, script: 0x5b, flags: 0x0},
+ 340: {region: 0x10d, script: 0x5b, flags: 0x0},
+ 341: {region: 0x74, script: 0x5b, flags: 0x0},
+ 342: {region: 0x166, script: 0x5b, flags: 0x0},
+ 343: {region: 0x166, script: 0x5b, flags: 0x0},
+ 344: {region: 0x77, script: 0x5b, flags: 0x0},
+ 345: {region: 0x166, script: 0x5b, flags: 0x0},
+ 346: {region: 0x3b, script: 0x5b, flags: 0x0},
+ 347: {region: 0x166, script: 0x5b, flags: 0x0},
+ 348: {region: 0x166, script: 0x5b, flags: 0x0},
+ 349: {region: 0x166, script: 0x5b, flags: 0x0},
+ 350: {region: 0x79, script: 0x5b, flags: 0x0},
+ 351: {region: 0x136, script: 0x5b, flags: 0x0},
+ 352: {region: 0x79, script: 0x5b, flags: 0x0},
+ 353: {region: 0x61, script: 0x5b, flags: 0x0},
+ 354: {region: 0x61, script: 0x5b, flags: 0x0},
+ 355: {region: 0x52, script: 0x5, flags: 0x0},
+ 356: {region: 0x141, script: 0x5b, flags: 0x0},
+ 357: {region: 0x166, script: 0x5b, flags: 0x0},
+ 358: {region: 0x85, script: 0x5b, flags: 0x0},
+ 359: {region: 0x166, script: 0x5b, flags: 0x0},
+ 360: {region: 0xd5, script: 0x5b, flags: 0x0},
+ 361: {region: 0x9f, script: 0x5b, flags: 0x0},
+ 362: {region: 0xd7, script: 0x5b, flags: 0x0},
+ 363: {region: 0x166, script: 0x5b, flags: 0x0},
+ 364: {region: 0x10c, script: 0x5b, flags: 0x0},
+ 365: {region: 0xda, script: 0x5b, flags: 0x0},
+ 366: {region: 0x97, script: 0x5b, flags: 0x0},
+ 367: {region: 0x81, script: 0x5b, flags: 0x0},
+ 368: {region: 0x166, script: 0x5b, flags: 0x0},
+ 369: {region: 0xbd, script: 0x5b, flags: 0x0},
+ 370: {region: 0x166, script: 0x5b, flags: 0x0},
+ 371: {region: 0x166, script: 0x5b, flags: 0x0},
+ 372: {region: 0x166, script: 0x5b, flags: 0x0},
+ 373: {region: 0x53, script: 0x3b, flags: 0x0},
+ 374: {region: 0x166, script: 0x5b, flags: 0x0},
+ 375: {region: 0x96, script: 0x5b, flags: 0x0},
+ 376: {region: 0x166, script: 0x5b, flags: 0x0},
+ 377: {region: 0x166, script: 0x5b, flags: 0x0},
+ 378: {region: 0x9a, script: 0x22, flags: 0x0},
+ 379: {region: 0x166, script: 0x5b, flags: 0x0},
+ 380: {region: 0x9d, script: 0x5, flags: 0x0},
+ 381: {region: 0x7f, script: 0x5b, flags: 0x0},
+ 382: {region: 0x7c, script: 0x5b, flags: 0x0},
+ 383: {region: 0x166, script: 0x5b, flags: 0x0},
+ 384: {region: 0x166, script: 0x5b, flags: 0x0},
+ 385: {region: 0x166, script: 0x5b, flags: 0x0},
+ 386: {region: 0x166, script: 0x5b, flags: 0x0},
+ 387: {region: 0x166, script: 0x5b, flags: 0x0},
+ 388: {region: 0x166, script: 0x5b, flags: 0x0},
+ 389: {region: 0x70, script: 0x2c, flags: 0x0},
+ 390: {region: 0x166, script: 0x5b, flags: 0x0},
+ 391: {region: 0xdc, script: 0x22, flags: 0x0},
+ 392: {region: 0x166, script: 0x5b, flags: 0x0},
+ 393: {region: 0xa8, script: 0x5b, flags: 0x0},
+ 394: {region: 0x166, script: 0x5b, flags: 0x0},
+ 395: {region: 0xe9, script: 0x5, flags: 0x0},
+ 396: {region: 0x166, script: 0x5b, flags: 0x0},
+ 397: {region: 0xe9, script: 0x5, flags: 0x0},
+ 398: {region: 0x166, script: 0x5b, flags: 0x0},
+ 399: {region: 0x166, script: 0x5b, flags: 0x0},
+ 400: {region: 0x6f, script: 0x5b, flags: 0x0},
+ 401: {region: 0x9d, script: 0x5, flags: 0x0},
+ 402: {region: 0x166, script: 0x5b, flags: 0x0},
+ 403: {region: 0x166, script: 0x2c, flags: 0x0},
+ 404: {region: 0xf2, script: 0x5b, flags: 0x0},
+ 405: {region: 0x166, script: 0x5b, flags: 0x0},
+ 406: {region: 0x166, script: 0x5b, flags: 0x0},
+ 407: {region: 0x166, script: 0x5b, flags: 0x0},
+ 408: {region: 0x166, script: 0x2c, flags: 0x0},
+ 409: {region: 0x166, script: 0x5b, flags: 0x0},
+ 410: {region: 0x9a, script: 0x22, flags: 0x0},
+ 411: {region: 0x9a, script: 0xe9, flags: 0x0},
+ 412: {region: 0x96, script: 0x5b, flags: 0x0},
+ 413: {region: 0xda, script: 0x5b, flags: 0x0},
+ 414: {region: 0x131, script: 0x32, flags: 0x0},
+ 415: {region: 0x166, script: 0x5b, flags: 0x0},
+ 416: {region: 0xe, script: 0x2, flags: 0x1},
+ 417: {region: 0x9a, script: 0xe, flags: 0x0},
+ 418: {region: 0x166, script: 0x5b, flags: 0x0},
+ 419: {region: 0x4e, script: 0x5b, flags: 0x0},
+ 420: {region: 0x9a, script: 0x35, flags: 0x0},
+ 421: {region: 0x41, script: 0x5b, flags: 0x0},
+ 422: {region: 0x54, script: 0x5b, flags: 0x0},
+ 423: {region: 0x166, script: 0x5b, flags: 0x0},
+ 424: {region: 0x81, script: 0x5b, flags: 0x0},
+ 425: {region: 0x166, script: 0x5b, flags: 0x0},
+ 426: {region: 0x166, script: 0x5b, flags: 0x0},
+ 427: {region: 0xa5, script: 0x5b, flags: 0x0},
+ 428: {region: 0x99, script: 0x5b, flags: 0x0},
+ 429: {region: 0x166, script: 0x5b, flags: 0x0},
+ 430: {region: 0xdc, script: 0x22, flags: 0x0},
+ 431: {region: 0x166, script: 0x5b, flags: 0x0},
+ 432: {region: 0x166, script: 0x5, flags: 0x0},
+ 433: {region: 0x49, script: 0x5b, flags: 0x0},
+ 434: {region: 0x166, script: 0x5, flags: 0x0},
+ 435: {region: 0x166, script: 0x5b, flags: 0x0},
+ 436: {region: 0x10, script: 0x3, flags: 0x1},
+ 437: {region: 0x166, script: 0x5b, flags: 0x0},
+ 438: {region: 0x53, script: 0x3b, flags: 0x0},
+ 439: {region: 0x166, script: 0x5b, flags: 0x0},
+ 440: {region: 0x136, script: 0x5b, flags: 0x0},
+ 441: {region: 0x24, script: 0x5, flags: 0x0},
+ 442: {region: 0x166, script: 0x5b, flags: 0x0},
+ 443: {region: 0x166, script: 0x2c, flags: 0x0},
+ 444: {region: 0x98, script: 0x3e, flags: 0x0},
+ 445: {region: 0x166, script: 0x5b, flags: 0x0},
+ 446: {region: 0x9a, script: 0x22, flags: 0x0},
+ 447: {region: 0x166, script: 0x5b, flags: 0x0},
+ 448: {region: 0x74, script: 0x5b, flags: 0x0},
+ 449: {region: 0x166, script: 0x5b, flags: 0x0},
+ 450: {region: 0x166, script: 0x5b, flags: 0x0},
+ 451: {region: 0xe8, script: 0x5b, flags: 0x0},
+ 452: {region: 0x166, script: 0x5b, flags: 0x0},
+ 453: {region: 0x12c, script: 0x40, flags: 0x0},
+ 454: {region: 0x53, script: 0x92, flags: 0x0},
+ 455: {region: 0x166, script: 0x5b, flags: 0x0},
+ 456: {region: 0xe9, script: 0x5, flags: 0x0},
+ 457: {region: 0x9a, script: 0x22, flags: 0x0},
+ 458: {region: 0xb0, script: 0x41, flags: 0x0},
+ 459: {region: 0xe8, script: 0x5b, flags: 0x0},
+ 460: {region: 0xe9, script: 0x5, flags: 0x0},
+ 461: {region: 0xe7, script: 0x5b, flags: 0x0},
+ 462: {region: 0x9a, script: 0x22, flags: 0x0},
+ 463: {region: 0x9a, script: 0x22, flags: 0x0},
+ 464: {region: 0x166, script: 0x5b, flags: 0x0},
+ 465: {region: 0x91, script: 0x5b, flags: 0x0},
+ 466: {region: 0x61, script: 0x5b, flags: 0x0},
+ 467: {region: 0x53, script: 0x3b, flags: 0x0},
+ 468: {region: 0x92, script: 0x5b, flags: 0x0},
+ 469: {region: 0x93, script: 0x5b, flags: 0x0},
+ 470: {region: 0x166, script: 0x5b, flags: 0x0},
+ 471: {region: 0x28, script: 0x8, flags: 0x0},
+ 472: {region: 0xd3, script: 0x5b, flags: 0x0},
+ 473: {region: 0x79, script: 0x5b, flags: 0x0},
+ 474: {region: 0x166, script: 0x5b, flags: 0x0},
+ 475: {region: 0x166, script: 0x5b, flags: 0x0},
+ 476: {region: 0xd1, script: 0x5b, flags: 0x0},
+ 477: {region: 0xd7, script: 0x5b, flags: 0x0},
+ 478: {region: 0x166, script: 0x5b, flags: 0x0},
+ 479: {region: 0x166, script: 0x5b, flags: 0x0},
+ 480: {region: 0x166, script: 0x5b, flags: 0x0},
+ 481: {region: 0x96, script: 0x5b, flags: 0x0},
+ 482: {region: 0x166, script: 0x5b, flags: 0x0},
+ 483: {region: 0x166, script: 0x5b, flags: 0x0},
+ 484: {region: 0x166, script: 0x5b, flags: 0x0},
+ 486: {region: 0x123, script: 0x5b, flags: 0x0},
+ 487: {region: 0xd7, script: 0x5b, flags: 0x0},
+ 488: {region: 0x166, script: 0x5b, flags: 0x0},
+ 489: {region: 0x166, script: 0x5b, flags: 0x0},
+ 490: {region: 0x53, script: 0xfd, flags: 0x0},
+ 491: {region: 0x166, script: 0x5b, flags: 0x0},
+ 492: {region: 0x136, script: 0x5b, flags: 0x0},
+ 493: {region: 0x166, script: 0x5b, flags: 0x0},
+ 494: {region: 0x49, script: 0x5b, flags: 0x0},
+ 495: {region: 0x166, script: 0x5b, flags: 0x0},
+ 496: {region: 0x166, script: 0x5b, flags: 0x0},
+ 497: {region: 0xe8, script: 0x5b, flags: 0x0},
+ 498: {region: 0x166, script: 0x5b, flags: 0x0},
+ 499: {region: 0x96, script: 0x5b, flags: 0x0},
+ 500: {region: 0x107, script: 0x20, flags: 0x0},
+ 501: {region: 0x1, script: 0x5b, flags: 0x0},
+ 502: {region: 0x166, script: 0x5b, flags: 0x0},
+ 503: {region: 0x166, script: 0x5b, flags: 0x0},
+ 504: {region: 0x9e, script: 0x5b, flags: 0x0},
+ 505: {region: 0x9f, script: 0x5b, flags: 0x0},
+ 506: {region: 0x49, script: 0x17, flags: 0x0},
+ 507: {region: 0x98, script: 0x3e, flags: 0x0},
+ 508: {region: 0x166, script: 0x5b, flags: 0x0},
+ 509: {region: 0x166, script: 0x5b, flags: 0x0},
+ 510: {region: 0x107, script: 0x5b, flags: 0x0},
+ 511: {region: 0x166, script: 0x5b, flags: 0x0},
+ 512: {region: 0xa3, script: 0x49, flags: 0x0},
+ 513: {region: 0x166, script: 0x5b, flags: 0x0},
+ 514: {region: 0xa1, script: 0x5b, flags: 0x0},
+ 515: {region: 0x1, script: 0x5b, flags: 0x0},
+ 516: {region: 0x166, script: 0x5b, flags: 0x0},
+ 517: {region: 0x166, script: 0x5b, flags: 0x0},
+ 518: {region: 0x166, script: 0x5b, flags: 0x0},
+ 519: {region: 0x52, script: 0x5b, flags: 0x0},
+ 520: {region: 0x131, script: 0x3e, flags: 0x0},
+ 521: {region: 0x166, script: 0x5b, flags: 0x0},
+ 522: {region: 0x130, script: 0x5b, flags: 0x0},
+ 523: {region: 0xdc, script: 0x22, flags: 0x0},
+ 524: {region: 0x166, script: 0x5b, flags: 0x0},
+ 525: {region: 0x64, script: 0x5b, flags: 0x0},
+ 526: {region: 0x96, script: 0x5b, flags: 0x0},
+ 527: {region: 0x96, script: 0x5b, flags: 0x0},
+ 528: {region: 0x7e, script: 0x2e, flags: 0x0},
+ 529: {region: 0x138, script: 0x20, flags: 0x0},
+ 530: {region: 0x68, script: 0x5b, flags: 0x0},
+ 531: {region: 0xc5, script: 0x5b, flags: 0x0},
+ 532: {region: 0x166, script: 0x5b, flags: 0x0},
+ 533: {region: 0x166, script: 0x5b, flags: 0x0},
+ 534: {region: 0xd7, script: 0x5b, flags: 0x0},
+ 535: {region: 0xa5, script: 0x5b, flags: 0x0},
+ 536: {region: 0xc4, script: 0x5b, flags: 0x0},
+ 537: {region: 0x107, script: 0x20, flags: 0x0},
+ 538: {region: 0x166, script: 0x5b, flags: 0x0},
+ 539: {region: 0x166, script: 0x5b, flags: 0x0},
+ 540: {region: 0x166, script: 0x5b, flags: 0x0},
+ 541: {region: 0x166, script: 0x5b, flags: 0x0},
+ 542: {region: 0xd5, script: 0x5, flags: 0x0},
+ 543: {region: 0xd7, script: 0x5b, flags: 0x0},
+ 544: {region: 0x165, script: 0x5b, flags: 0x0},
+ 545: {region: 0x166, script: 0x5b, flags: 0x0},
+ 546: {region: 0x166, script: 0x5b, flags: 0x0},
+ 547: {region: 0x130, script: 0x5b, flags: 0x0},
+ 548: {region: 0x123, script: 0x5, flags: 0x0},
+ 549: {region: 0x166, script: 0x5b, flags: 0x0},
+ 550: {region: 0x124, script: 0xee, flags: 0x0},
+ 551: {region: 0x5b, script: 0x5b, flags: 0x0},
+ 552: {region: 0x52, script: 0x5b, flags: 0x0},
+ 553: {region: 0x166, script: 0x5b, flags: 0x0},
+ 554: {region: 0x4f, script: 0x5b, flags: 0x0},
+ 555: {region: 0x9a, script: 0x22, flags: 0x0},
+ 556: {region: 0x9a, script: 0x22, flags: 0x0},
+ 557: {region: 0x4b, script: 0x5b, flags: 0x0},
+ 558: {region: 0x96, script: 0x5b, flags: 0x0},
+ 559: {region: 0x166, script: 0x5b, flags: 0x0},
+ 560: {region: 0x41, script: 0x5b, flags: 0x0},
+ 561: {region: 0x9a, script: 0x5b, flags: 0x0},
+ 562: {region: 0x53, script: 0xe5, flags: 0x0},
+ 563: {region: 0x9a, script: 0x22, flags: 0x0},
+ 564: {region: 0xc4, script: 0x5b, flags: 0x0},
+ 565: {region: 0x166, script: 0x5b, flags: 0x0},
+ 566: {region: 0x9a, script: 0x76, flags: 0x0},
+ 567: {region: 0xe9, script: 0x5, flags: 0x0},
+ 568: {region: 0x166, script: 0x5b, flags: 0x0},
+ 569: {region: 0xa5, script: 0x5b, flags: 0x0},
+ 570: {region: 0x166, script: 0x5b, flags: 0x0},
+ 571: {region: 0x12c, script: 0x5b, flags: 0x0},
+ 572: {region: 0x166, script: 0x5b, flags: 0x0},
+ 573: {region: 0xd3, script: 0x5b, flags: 0x0},
+ 574: {region: 0x166, script: 0x5b, flags: 0x0},
+ 575: {region: 0xb0, script: 0x58, flags: 0x0},
+ 576: {region: 0x166, script: 0x5b, flags: 0x0},
+ 577: {region: 0x166, script: 0x5b, flags: 0x0},
+ 578: {region: 0x13, script: 0x6, flags: 0x1},
+ 579: {region: 0x166, script: 0x5b, flags: 0x0},
+ 580: {region: 0x52, script: 0x5b, flags: 0x0},
+ 581: {region: 0x83, script: 0x5b, flags: 0x0},
+ 582: {region: 0xa5, script: 0x5b, flags: 0x0},
+ 583: {region: 0x166, script: 0x5b, flags: 0x0},
+ 584: {region: 0x166, script: 0x5b, flags: 0x0},
+ 585: {region: 0x166, script: 0x5b, flags: 0x0},
+ 586: {region: 0xa7, script: 0x4f, flags: 0x0},
+ 587: {region: 0x2a, script: 0x5b, flags: 0x0},
+ 588: {region: 0x166, script: 0x5b, flags: 0x0},
+ 589: {region: 0x166, script: 0x5b, flags: 0x0},
+ 590: {region: 0x166, script: 0x5b, flags: 0x0},
+ 591: {region: 0x166, script: 0x5b, flags: 0x0},
+ 592: {region: 0x166, script: 0x5b, flags: 0x0},
+ 593: {region: 0x9a, script: 0x53, flags: 0x0},
+ 594: {region: 0x8c, script: 0x5b, flags: 0x0},
+ 595: {region: 0x166, script: 0x5b, flags: 0x0},
+ 596: {region: 0xac, script: 0x54, flags: 0x0},
+ 597: {region: 0x107, script: 0x20, flags: 0x0},
+ 598: {region: 0x9a, script: 0x22, flags: 0x0},
+ 599: {region: 0x166, script: 0x5b, flags: 0x0},
+ 600: {region: 0x76, script: 0x5b, flags: 0x0},
+ 601: {region: 0x166, script: 0x5b, flags: 0x0},
+ 602: {region: 0xb5, script: 0x5b, flags: 0x0},
+ 603: {region: 0x166, script: 0x5b, flags: 0x0},
+ 604: {region: 0x166, script: 0x5b, flags: 0x0},
+ 605: {region: 0x166, script: 0x5b, flags: 0x0},
+ 606: {region: 0x166, script: 0x5b, flags: 0x0},
+ 607: {region: 0x166, script: 0x5b, flags: 0x0},
+ 608: {region: 0x166, script: 0x5b, flags: 0x0},
+ 609: {region: 0x166, script: 0x5b, flags: 0x0},
+ 610: {region: 0x166, script: 0x2c, flags: 0x0},
+ 611: {region: 0x166, script: 0x5b, flags: 0x0},
+ 612: {region: 0x107, script: 0x20, flags: 0x0},
+ 613: {region: 0x113, script: 0x5b, flags: 0x0},
+ 614: {region: 0xe8, script: 0x5b, flags: 0x0},
+ 615: {region: 0x107, script: 0x5b, flags: 0x0},
+ 616: {region: 0x166, script: 0x5b, flags: 0x0},
+ 617: {region: 0x9a, script: 0x22, flags: 0x0},
+ 618: {region: 0x9a, script: 0x5, flags: 0x0},
+ 619: {region: 0x130, script: 0x5b, flags: 0x0},
+ 620: {region: 0x166, script: 0x5b, flags: 0x0},
+ 621: {region: 0x52, script: 0x5b, flags: 0x0},
+ 622: {region: 0x61, script: 0x5b, flags: 0x0},
+ 623: {region: 0x166, script: 0x5b, flags: 0x0},
+ 624: {region: 0x166, script: 0x5b, flags: 0x0},
+ 625: {region: 0x166, script: 0x2c, flags: 0x0},
+ 626: {region: 0x166, script: 0x5b, flags: 0x0},
+ 627: {region: 0x166, script: 0x5b, flags: 0x0},
+ 628: {region: 0x19, script: 0x3, flags: 0x1},
+ 629: {region: 0x166, script: 0x5b, flags: 0x0},
+ 630: {region: 0x166, script: 0x5b, flags: 0x0},
+ 631: {region: 0x166, script: 0x5b, flags: 0x0},
+ 632: {region: 0x166, script: 0x5b, flags: 0x0},
+ 633: {region: 0x107, script: 0x20, flags: 0x0},
+ 634: {region: 0x166, script: 0x5b, flags: 0x0},
+ 635: {region: 0x166, script: 0x5b, flags: 0x0},
+ 636: {region: 0x166, script: 0x5b, flags: 0x0},
+ 637: {region: 0x107, script: 0x20, flags: 0x0},
+ 638: {region: 0x166, script: 0x5b, flags: 0x0},
+ 639: {region: 0x96, script: 0x5b, flags: 0x0},
+ 640: {region: 0xe9, script: 0x5, flags: 0x0},
+ 641: {region: 0x7c, script: 0x5b, flags: 0x0},
+ 642: {region: 0x166, script: 0x5b, flags: 0x0},
+ 643: {region: 0x166, script: 0x5b, flags: 0x0},
+ 644: {region: 0x166, script: 0x5b, flags: 0x0},
+ 645: {region: 0x166, script: 0x2c, flags: 0x0},
+ 646: {region: 0x124, script: 0xee, flags: 0x0},
+ 647: {region: 0xe9, script: 0x5, flags: 0x0},
+ 648: {region: 0x166, script: 0x5b, flags: 0x0},
+ 649: {region: 0x166, script: 0x5b, flags: 0x0},
+ 650: {region: 0x1c, script: 0x5, flags: 0x1},
+ 651: {region: 0x166, script: 0x5b, flags: 0x0},
+ 652: {region: 0x166, script: 0x5b, flags: 0x0},
+ 653: {region: 0x166, script: 0x5b, flags: 0x0},
+ 654: {region: 0x139, script: 0x5b, flags: 0x0},
+ 655: {region: 0x88, script: 0x5f, flags: 0x0},
+ 656: {region: 0x98, script: 0x3e, flags: 0x0},
+ 657: {region: 0x130, script: 0x5b, flags: 0x0},
+ 658: {region: 0xe9, script: 0x5, flags: 0x0},
+ 659: {region: 0x132, script: 0x5b, flags: 0x0},
+ 660: {region: 0x166, script: 0x5b, flags: 0x0},
+ 661: {region: 0xb8, script: 0x5b, flags: 0x0},
+ 662: {region: 0x107, script: 0x20, flags: 0x0},
+ 663: {region: 0x166, script: 0x5b, flags: 0x0},
+ 664: {region: 0x96, script: 0x5b, flags: 0x0},
+ 665: {region: 0x166, script: 0x5b, flags: 0x0},
+ 666: {region: 0x53, script: 0xee, flags: 0x0},
+ 667: {region: 0x166, script: 0x5b, flags: 0x0},
+ 668: {region: 0x166, script: 0x5b, flags: 0x0},
+ 669: {region: 0x166, script: 0x5b, flags: 0x0},
+ 670: {region: 0x166, script: 0x5b, flags: 0x0},
+ 671: {region: 0x9a, script: 0x5d, flags: 0x0},
+ 672: {region: 0x166, script: 0x5b, flags: 0x0},
+ 673: {region: 0x166, script: 0x5b, flags: 0x0},
+ 674: {region: 0x107, script: 0x20, flags: 0x0},
+ 675: {region: 0x132, script: 0x5b, flags: 0x0},
+ 676: {region: 0x166, script: 0x5b, flags: 0x0},
+ 677: {region: 0xda, script: 0x5b, flags: 0x0},
+ 678: {region: 0x166, script: 0x5b, flags: 0x0},
+ 679: {region: 0x166, script: 0x5b, flags: 0x0},
+ 680: {region: 0x21, script: 0x2, flags: 0x1},
+ 681: {region: 0x166, script: 0x5b, flags: 0x0},
+ 682: {region: 0x166, script: 0x5b, flags: 0x0},
+ 683: {region: 0x9f, script: 0x5b, flags: 0x0},
+ 684: {region: 0x53, script: 0x61, flags: 0x0},
+ 685: {region: 0x96, script: 0x5b, flags: 0x0},
+ 686: {region: 0x9d, script: 0x5, flags: 0x0},
+ 687: {region: 0x136, script: 0x5b, flags: 0x0},
+ 688: {region: 0x166, script: 0x5b, flags: 0x0},
+ 689: {region: 0x166, script: 0x5b, flags: 0x0},
+ 690: {region: 0x9a, script: 0xe9, flags: 0x0},
+ 691: {region: 0x9f, script: 0x5b, flags: 0x0},
+ 692: {region: 0x166, script: 0x5b, flags: 0x0},
+ 693: {region: 0x4b, script: 0x5b, flags: 0x0},
+ 694: {region: 0x166, script: 0x5b, flags: 0x0},
+ 695: {region: 0x166, script: 0x5b, flags: 0x0},
+ 696: {region: 0xb0, script: 0x58, flags: 0x0},
+ 697: {region: 0x166, script: 0x5b, flags: 0x0},
+ 698: {region: 0x166, script: 0x5b, flags: 0x0},
+ 699: {region: 0x4b, script: 0x5b, flags: 0x0},
+ 700: {region: 0x166, script: 0x5b, flags: 0x0},
+ 701: {region: 0x166, script: 0x5b, flags: 0x0},
+ 702: {region: 0x163, script: 0x5b, flags: 0x0},
+ 703: {region: 0x9d, script: 0x5, flags: 0x0},
+ 704: {region: 0xb7, script: 0x5b, flags: 0x0},
+ 705: {region: 0xb9, script: 0x5b, flags: 0x0},
+ 706: {region: 0x4b, script: 0x5b, flags: 0x0},
+ 707: {region: 0x4b, script: 0x5b, flags: 0x0},
+ 708: {region: 0xa5, script: 0x5b, flags: 0x0},
+ 709: {region: 0xa5, script: 0x5b, flags: 0x0},
+ 710: {region: 0x9d, script: 0x5, flags: 0x0},
+ 711: {region: 0xb9, script: 0x5b, flags: 0x0},
+ 712: {region: 0x124, script: 0xee, flags: 0x0},
+ 713: {region: 0x53, script: 0x3b, flags: 0x0},
+ 714: {region: 0x12c, script: 0x5b, flags: 0x0},
+ 715: {region: 0x96, script: 0x5b, flags: 0x0},
+ 716: {region: 0x52, script: 0x5b, flags: 0x0},
+ 717: {region: 0x9a, script: 0x22, flags: 0x0},
+ 718: {region: 0x9a, script: 0x22, flags: 0x0},
+ 719: {region: 0x96, script: 0x5b, flags: 0x0},
+ 720: {region: 0x23, script: 0x3, flags: 0x1},
+ 721: {region: 0xa5, script: 0x5b, flags: 0x0},
+ 722: {region: 0x166, script: 0x5b, flags: 0x0},
+ 723: {region: 0xd0, script: 0x5b, flags: 0x0},
+ 724: {region: 0x166, script: 0x5b, flags: 0x0},
+ 725: {region: 0x166, script: 0x5b, flags: 0x0},
+ 726: {region: 0x166, script: 0x5b, flags: 0x0},
+ 727: {region: 0x166, script: 0x5b, flags: 0x0},
+ 728: {region: 0x166, script: 0x5b, flags: 0x0},
+ 729: {region: 0x166, script: 0x5b, flags: 0x0},
+ 730: {region: 0x166, script: 0x5b, flags: 0x0},
+ 731: {region: 0x166, script: 0x5b, flags: 0x0},
+ 732: {region: 0x166, script: 0x5b, flags: 0x0},
+ 733: {region: 0x166, script: 0x5b, flags: 0x0},
+ 734: {region: 0x166, script: 0x5b, flags: 0x0},
+ 735: {region: 0x166, script: 0x5, flags: 0x0},
+ 736: {region: 0x107, script: 0x20, flags: 0x0},
+ 737: {region: 0xe8, script: 0x5b, flags: 0x0},
+ 738: {region: 0x166, script: 0x5b, flags: 0x0},
+ 739: {region: 0x96, script: 0x5b, flags: 0x0},
+ 740: {region: 0x166, script: 0x2c, flags: 0x0},
+ 741: {region: 0x166, script: 0x5b, flags: 0x0},
+ 742: {region: 0x166, script: 0x5b, flags: 0x0},
+ 743: {region: 0x166, script: 0x5b, flags: 0x0},
+ 744: {region: 0x113, script: 0x5b, flags: 0x0},
+ 745: {region: 0xa5, script: 0x5b, flags: 0x0},
+ 746: {region: 0x166, script: 0x5b, flags: 0x0},
+ 747: {region: 0x166, script: 0x5b, flags: 0x0},
+ 748: {region: 0x124, script: 0x5, flags: 0x0},
+ 749: {region: 0xcd, script: 0x5b, flags: 0x0},
+ 750: {region: 0x166, script: 0x5b, flags: 0x0},
+ 751: {region: 0x166, script: 0x5b, flags: 0x0},
+ 752: {region: 0x166, script: 0x5b, flags: 0x0},
+ 753: {region: 0xc0, script: 0x5b, flags: 0x0},
+ 754: {region: 0xd2, script: 0x5b, flags: 0x0},
+ 755: {region: 0x166, script: 0x5b, flags: 0x0},
+ 756: {region: 0x52, script: 0x5b, flags: 0x0},
+ 757: {region: 0xdc, script: 0x22, flags: 0x0},
+ 758: {region: 0x130, script: 0x5b, flags: 0x0},
+ 759: {region: 0xc1, script: 0x5b, flags: 0x0},
+ 760: {region: 0x166, script: 0x5b, flags: 0x0},
+ 761: {region: 0x166, script: 0x5b, flags: 0x0},
+ 762: {region: 0xe1, script: 0x5b, flags: 0x0},
+ 763: {region: 0x166, script: 0x5b, flags: 0x0},
+ 764: {region: 0x96, script: 0x5b, flags: 0x0},
+ 765: {region: 0x9c, script: 0x3d, flags: 0x0},
+ 766: {region: 0x166, script: 0x5b, flags: 0x0},
+ 767: {region: 0xc3, script: 0x20, flags: 0x0},
+ 768: {region: 0x166, script: 0x5, flags: 0x0},
+ 769: {region: 0x166, script: 0x5b, flags: 0x0},
+ 770: {region: 0x166, script: 0x5b, flags: 0x0},
+ 771: {region: 0x166, script: 0x5b, flags: 0x0},
+ 772: {region: 0x9a, script: 0x6f, flags: 0x0},
+ 773: {region: 0x166, script: 0x5b, flags: 0x0},
+ 774: {region: 0x166, script: 0x5b, flags: 0x0},
+ 775: {region: 0x10c, script: 0x5b, flags: 0x0},
+ 776: {region: 0x166, script: 0x5b, flags: 0x0},
+ 777: {region: 0x166, script: 0x5b, flags: 0x0},
+ 778: {region: 0x166, script: 0x5b, flags: 0x0},
+ 779: {region: 0x26, script: 0x3, flags: 0x1},
+ 780: {region: 0x166, script: 0x5b, flags: 0x0},
+ 781: {region: 0x166, script: 0x5b, flags: 0x0},
+ 782: {region: 0x9a, script: 0xe, flags: 0x0},
+ 783: {region: 0xc5, script: 0x76, flags: 0x0},
+ 785: {region: 0x166, script: 0x5b, flags: 0x0},
+ 786: {region: 0x49, script: 0x5b, flags: 0x0},
+ 787: {region: 0x49, script: 0x5b, flags: 0x0},
+ 788: {region: 0x37, script: 0x5b, flags: 0x0},
+ 789: {region: 0x166, script: 0x5b, flags: 0x0},
+ 790: {region: 0x166, script: 0x5b, flags: 0x0},
+ 791: {region: 0x166, script: 0x5b, flags: 0x0},
+ 792: {region: 0x166, script: 0x5b, flags: 0x0},
+ 793: {region: 0x166, script: 0x5b, flags: 0x0},
+ 794: {region: 0x166, script: 0x5b, flags: 0x0},
+ 795: {region: 0x9a, script: 0x22, flags: 0x0},
+ 796: {region: 0xdc, script: 0x22, flags: 0x0},
+ 797: {region: 0x107, script: 0x20, flags: 0x0},
+ 798: {region: 0x35, script: 0x73, flags: 0x0},
+ 799: {region: 0x29, script: 0x3, flags: 0x1},
+ 800: {region: 0xcc, script: 0x5b, flags: 0x0},
+ 801: {region: 0x166, script: 0x5b, flags: 0x0},
+ 802: {region: 0x166, script: 0x5b, flags: 0x0},
+ 803: {region: 0x166, script: 0x5b, flags: 0x0},
+ 804: {region: 0x9a, script: 0x22, flags: 0x0},
+ 805: {region: 0x52, script: 0x5b, flags: 0x0},
+ 807: {region: 0x166, script: 0x5b, flags: 0x0},
+ 808: {region: 0x136, script: 0x5b, flags: 0x0},
+ 809: {region: 0x166, script: 0x5b, flags: 0x0},
+ 810: {region: 0x166, script: 0x5b, flags: 0x0},
+ 811: {region: 0xe9, script: 0x5, flags: 0x0},
+ 812: {region: 0xc4, script: 0x5b, flags: 0x0},
+ 813: {region: 0x9a, script: 0x22, flags: 0x0},
+ 814: {region: 0x96, script: 0x5b, flags: 0x0},
+ 815: {region: 0x165, script: 0x5b, flags: 0x0},
+ 816: {region: 0x166, script: 0x5b, flags: 0x0},
+ 817: {region: 0xc5, script: 0x76, flags: 0x0},
+ 818: {region: 0x166, script: 0x5b, flags: 0x0},
+ 819: {region: 0x166, script: 0x2c, flags: 0x0},
+ 820: {region: 0x107, script: 0x20, flags: 0x0},
+ 821: {region: 0x166, script: 0x5b, flags: 0x0},
+ 822: {region: 0x132, script: 0x5b, flags: 0x0},
+ 823: {region: 0x9d, script: 0x67, flags: 0x0},
+ 824: {region: 0x166, script: 0x5b, flags: 0x0},
+ 825: {region: 0x166, script: 0x5b, flags: 0x0},
+ 826: {region: 0x9d, script: 0x5, flags: 0x0},
+ 827: {region: 0x166, script: 0x5b, flags: 0x0},
+ 828: {region: 0x166, script: 0x5b, flags: 0x0},
+ 829: {region: 0x166, script: 0x5b, flags: 0x0},
+ 830: {region: 0xde, script: 0x5b, flags: 0x0},
+ 831: {region: 0x166, script: 0x5b, flags: 0x0},
+ 832: {region: 0x166, script: 0x5b, flags: 0x0},
+ 834: {region: 0x166, script: 0x5b, flags: 0x0},
+ 835: {region: 0x53, script: 0x3b, flags: 0x0},
+ 836: {region: 0x9f, script: 0x5b, flags: 0x0},
+ 837: {region: 0xd3, script: 0x5b, flags: 0x0},
+ 838: {region: 0x166, script: 0x5b, flags: 0x0},
+ 839: {region: 0xdb, script: 0x5b, flags: 0x0},
+ 840: {region: 0x166, script: 0x5b, flags: 0x0},
+ 841: {region: 0x166, script: 0x5b, flags: 0x0},
+ 842: {region: 0x166, script: 0x5b, flags: 0x0},
+ 843: {region: 0xd0, script: 0x5b, flags: 0x0},
+ 844: {region: 0x166, script: 0x5b, flags: 0x0},
+ 845: {region: 0x166, script: 0x5b, flags: 0x0},
+ 846: {region: 0x165, script: 0x5b, flags: 0x0},
+ 847: {region: 0xd2, script: 0x5b, flags: 0x0},
+ 848: {region: 0x61, script: 0x5b, flags: 0x0},
+ 849: {region: 0xdc, script: 0x22, flags: 0x0},
+ 850: {region: 0x166, script: 0x5b, flags: 0x0},
+ 851: {region: 0xdc, script: 0x22, flags: 0x0},
+ 852: {region: 0x166, script: 0x5b, flags: 0x0},
+ 853: {region: 0x166, script: 0x5b, flags: 0x0},
+ 854: {region: 0xd3, script: 0x5b, flags: 0x0},
+ 855: {region: 0x166, script: 0x5b, flags: 0x0},
+ 856: {region: 0x166, script: 0x5b, flags: 0x0},
+ 857: {region: 0xd2, script: 0x5b, flags: 0x0},
+ 858: {region: 0x166, script: 0x5b, flags: 0x0},
+ 859: {region: 0xd0, script: 0x5b, flags: 0x0},
+ 860: {region: 0xd0, script: 0x5b, flags: 0x0},
+ 861: {region: 0x166, script: 0x5b, flags: 0x0},
+ 862: {region: 0x166, script: 0x5b, flags: 0x0},
+ 863: {region: 0x96, script: 0x5b, flags: 0x0},
+ 864: {region: 0x166, script: 0x5b, flags: 0x0},
+ 865: {region: 0xe0, script: 0x5b, flags: 0x0},
+ 866: {region: 0x166, script: 0x5b, flags: 0x0},
+ 867: {region: 0x166, script: 0x5b, flags: 0x0},
+ 868: {region: 0x9a, script: 0x5b, flags: 0x0},
+ 869: {region: 0x166, script: 0x5b, flags: 0x0},
+ 870: {region: 0x166, script: 0x5b, flags: 0x0},
+ 871: {region: 0xda, script: 0x5b, flags: 0x0},
+ 872: {region: 0x52, script: 0x5b, flags: 0x0},
+ 873: {region: 0x166, script: 0x5b, flags: 0x0},
+ 874: {region: 0xdb, script: 0x5b, flags: 0x0},
+ 875: {region: 0x166, script: 0x5b, flags: 0x0},
+ 876: {region: 0x52, script: 0x5b, flags: 0x0},
+ 877: {region: 0x166, script: 0x5b, flags: 0x0},
+ 878: {region: 0x166, script: 0x5b, flags: 0x0},
+ 879: {region: 0xdb, script: 0x5b, flags: 0x0},
+ 880: {region: 0x124, script: 0x57, flags: 0x0},
+ 881: {region: 0x9a, script: 0x22, flags: 0x0},
+ 882: {region: 0x10d, script: 0xcb, flags: 0x0},
+ 883: {region: 0x166, script: 0x5b, flags: 0x0},
+ 884: {region: 0x166, script: 0x5b, flags: 0x0},
+ 885: {region: 0x85, script: 0x7e, flags: 0x0},
+ 886: {region: 0x162, script: 0x5b, flags: 0x0},
+ 887: {region: 0x166, script: 0x5b, flags: 0x0},
+ 888: {region: 0x49, script: 0x17, flags: 0x0},
+ 889: {region: 0x166, script: 0x5b, flags: 0x0},
+ 890: {region: 0x162, script: 0x5b, flags: 0x0},
+ 891: {region: 0x166, script: 0x5b, flags: 0x0},
+ 892: {region: 0x166, script: 0x5b, flags: 0x0},
+ 893: {region: 0x166, script: 0x5b, flags: 0x0},
+ 894: {region: 0x166, script: 0x5b, flags: 0x0},
+ 895: {region: 0x166, script: 0x5b, flags: 0x0},
+ 896: {region: 0x118, script: 0x5b, flags: 0x0},
+ 897: {region: 0x166, script: 0x5b, flags: 0x0},
+ 898: {region: 0x166, script: 0x5b, flags: 0x0},
+ 899: {region: 0x136, script: 0x5b, flags: 0x0},
+ 900: {region: 0x166, script: 0x5b, flags: 0x0},
+ 901: {region: 0x53, script: 0x5b, flags: 0x0},
+ 902: {region: 0x166, script: 0x5b, flags: 0x0},
+ 903: {region: 0xcf, script: 0x5b, flags: 0x0},
+ 904: {region: 0x130, script: 0x5b, flags: 0x0},
+ 905: {region: 0x132, script: 0x5b, flags: 0x0},
+ 906: {region: 0x81, script: 0x5b, flags: 0x0},
+ 907: {region: 0x79, script: 0x5b, flags: 0x0},
+ 908: {region: 0x166, script: 0x5b, flags: 0x0},
+ 910: {region: 0x166, script: 0x5b, flags: 0x0},
+ 911: {region: 0x166, script: 0x5b, flags: 0x0},
+ 912: {region: 0x70, script: 0x5b, flags: 0x0},
+ 913: {region: 0x166, script: 0x5b, flags: 0x0},
+ 914: {region: 0x166, script: 0x5b, flags: 0x0},
+ 915: {region: 0x166, script: 0x5b, flags: 0x0},
+ 916: {region: 0x166, script: 0x5b, flags: 0x0},
+ 917: {region: 0x9a, script: 0x83, flags: 0x0},
+ 918: {region: 0x166, script: 0x5b, flags: 0x0},
+ 919: {region: 0x166, script: 0x5, flags: 0x0},
+ 920: {region: 0x7e, script: 0x20, flags: 0x0},
+ 921: {region: 0x136, script: 0x84, flags: 0x0},
+ 922: {region: 0x166, script: 0x5, flags: 0x0},
+ 923: {region: 0xc6, script: 0x82, flags: 0x0},
+ 924: {region: 0x166, script: 0x5b, flags: 0x0},
+ 925: {region: 0x2c, script: 0x3, flags: 0x1},
+ 926: {region: 0xe8, script: 0x5b, flags: 0x0},
+ 927: {region: 0x2f, script: 0x2, flags: 0x1},
+ 928: {region: 0xe8, script: 0x5b, flags: 0x0},
+ 929: {region: 0x30, script: 0x5b, flags: 0x0},
+ 930: {region: 0xf1, script: 0x5b, flags: 0x0},
+ 931: {region: 0x166, script: 0x5b, flags: 0x0},
+ 932: {region: 0x79, script: 0x5b, flags: 0x0},
+ 933: {region: 0xd7, script: 0x5b, flags: 0x0},
+ 934: {region: 0x136, script: 0x5b, flags: 0x0},
+ 935: {region: 0x49, script: 0x5b, flags: 0x0},
+ 936: {region: 0x166, script: 0x5b, flags: 0x0},
+ 937: {region: 0x9d, script: 0xfa, flags: 0x0},
+ 938: {region: 0x166, script: 0x5b, flags: 0x0},
+ 939: {region: 0x61, script: 0x5b, flags: 0x0},
+ 940: {region: 0x166, script: 0x5, flags: 0x0},
+ 941: {region: 0xb1, script: 0x90, flags: 0x0},
+ 943: {region: 0x166, script: 0x5b, flags: 0x0},
+ 944: {region: 0x166, script: 0x5b, flags: 0x0},
+ 945: {region: 0x9a, script: 0x12, flags: 0x0},
+ 946: {region: 0xa5, script: 0x5b, flags: 0x0},
+ 947: {region: 0xea, script: 0x5b, flags: 0x0},
+ 948: {region: 0x166, script: 0x5b, flags: 0x0},
+ 949: {region: 0x9f, script: 0x5b, flags: 0x0},
+ 950: {region: 0x166, script: 0x5b, flags: 0x0},
+ 951: {region: 0x166, script: 0x5b, flags: 0x0},
+ 952: {region: 0x88, script: 0x34, flags: 0x0},
+ 953: {region: 0x76, script: 0x5b, flags: 0x0},
+ 954: {region: 0x166, script: 0x5b, flags: 0x0},
+ 955: {region: 0xe9, script: 0x4e, flags: 0x0},
+ 956: {region: 0x9d, script: 0x5, flags: 0x0},
+ 957: {region: 0x1, script: 0x5b, flags: 0x0},
+ 958: {region: 0x24, script: 0x5, flags: 0x0},
+ 959: {region: 0x166, script: 0x5b, flags: 0x0},
+ 960: {region: 0x41, script: 0x5b, flags: 0x0},
+ 961: {region: 0x166, script: 0x5b, flags: 0x0},
+ 962: {region: 0x7b, script: 0x5b, flags: 0x0},
+ 963: {region: 0x166, script: 0x5b, flags: 0x0},
+ 964: {region: 0xe5, script: 0x5b, flags: 0x0},
+ 965: {region: 0x8a, script: 0x5b, flags: 0x0},
+ 966: {region: 0x6a, script: 0x5b, flags: 0x0},
+ 967: {region: 0x166, script: 0x5b, flags: 0x0},
+ 968: {region: 0x9a, script: 0x22, flags: 0x0},
+ 969: {region: 0x166, script: 0x5b, flags: 0x0},
+ 970: {region: 0x103, script: 0x5b, flags: 0x0},
+ 971: {region: 0x96, script: 0x5b, flags: 0x0},
+ 972: {region: 0x166, script: 0x5b, flags: 0x0},
+ 973: {region: 0x166, script: 0x5b, flags: 0x0},
+ 974: {region: 0x9f, script: 0x5b, flags: 0x0},
+ 975: {region: 0x166, script: 0x5, flags: 0x0},
+ 976: {region: 0x9a, script: 0x5b, flags: 0x0},
+ 977: {region: 0x31, script: 0x2, flags: 0x1},
+ 978: {region: 0xdc, script: 0x22, flags: 0x0},
+ 979: {region: 0x35, script: 0xe, flags: 0x0},
+ 980: {region: 0x4e, script: 0x5b, flags: 0x0},
+ 981: {region: 0x73, script: 0x5b, flags: 0x0},
+ 982: {region: 0x4e, script: 0x5b, flags: 0x0},
+ 983: {region: 0x9d, script: 0x5, flags: 0x0},
+ 984: {region: 0x10d, script: 0x5b, flags: 0x0},
+ 985: {region: 0x3a, script: 0x5b, flags: 0x0},
+ 986: {region: 0x166, script: 0x5b, flags: 0x0},
+ 987: {region: 0xd2, script: 0x5b, flags: 0x0},
+ 988: {region: 0x105, script: 0x5b, flags: 0x0},
+ 989: {region: 0x96, script: 0x5b, flags: 0x0},
+ 990: {region: 0x130, script: 0x5b, flags: 0x0},
+ 991: {region: 0x166, script: 0x5b, flags: 0x0},
+ 992: {region: 0x166, script: 0x5b, flags: 0x0},
+ 993: {region: 0x74, script: 0x5b, flags: 0x0},
+ 994: {region: 0x107, script: 0x20, flags: 0x0},
+ 995: {region: 0x131, script: 0x20, flags: 0x0},
+ 996: {region: 0x10a, script: 0x5b, flags: 0x0},
+ 997: {region: 0x108, script: 0x5b, flags: 0x0},
+ 998: {region: 0x130, script: 0x5b, flags: 0x0},
+ 999: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1000: {region: 0xa3, script: 0x4c, flags: 0x0},
+ 1001: {region: 0x9a, script: 0x22, flags: 0x0},
+ 1002: {region: 0x81, script: 0x5b, flags: 0x0},
+ 1003: {region: 0x107, script: 0x20, flags: 0x0},
+ 1004: {region: 0xa5, script: 0x5b, flags: 0x0},
+ 1005: {region: 0x96, script: 0x5b, flags: 0x0},
+ 1006: {region: 0x9a, script: 0x5b, flags: 0x0},
+ 1007: {region: 0x115, script: 0x5b, flags: 0x0},
+ 1008: {region: 0x9a, script: 0xcf, flags: 0x0},
+ 1009: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1010: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1011: {region: 0x130, script: 0x5b, flags: 0x0},
+ 1012: {region: 0x9f, script: 0x5b, flags: 0x0},
+ 1013: {region: 0x9a, script: 0x22, flags: 0x0},
+ 1014: {region: 0x166, script: 0x5, flags: 0x0},
+ 1015: {region: 0x9f, script: 0x5b, flags: 0x0},
+ 1016: {region: 0x7c, script: 0x5b, flags: 0x0},
+ 1017: {region: 0x49, script: 0x5b, flags: 0x0},
+ 1018: {region: 0x33, script: 0x4, flags: 0x1},
+ 1019: {region: 0x9f, script: 0x5b, flags: 0x0},
+ 1020: {region: 0x9d, script: 0x5, flags: 0x0},
+ 1021: {region: 0xdb, script: 0x5b, flags: 0x0},
+ 1022: {region: 0x4f, script: 0x5b, flags: 0x0},
+ 1023: {region: 0xd2, script: 0x5b, flags: 0x0},
+ 1024: {region: 0xd0, script: 0x5b, flags: 0x0},
+ 1025: {region: 0xc4, script: 0x5b, flags: 0x0},
+ 1026: {region: 0x4c, script: 0x5b, flags: 0x0},
+ 1027: {region: 0x97, script: 0x80, flags: 0x0},
+ 1028: {region: 0xb7, script: 0x5b, flags: 0x0},
+ 1029: {region: 0x166, script: 0x2c, flags: 0x0},
+ 1030: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1032: {region: 0xbb, script: 0xeb, flags: 0x0},
+ 1033: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1034: {region: 0xc5, script: 0x76, flags: 0x0},
+ 1035: {region: 0x166, script: 0x5, flags: 0x0},
+ 1036: {region: 0xb4, script: 0xd6, flags: 0x0},
+ 1037: {region: 0x70, script: 0x5b, flags: 0x0},
+ 1038: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1039: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1040: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1041: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1042: {region: 0x112, script: 0x5b, flags: 0x0},
+ 1043: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1044: {region: 0xe9, script: 0x5, flags: 0x0},
+ 1045: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1046: {region: 0x110, script: 0x5b, flags: 0x0},
+ 1047: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1048: {region: 0xea, script: 0x5b, flags: 0x0},
+ 1049: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1050: {region: 0x96, script: 0x5b, flags: 0x0},
+ 1051: {region: 0x143, script: 0x5b, flags: 0x0},
+ 1052: {region: 0x10d, script: 0x5b, flags: 0x0},
+ 1054: {region: 0x10d, script: 0x5b, flags: 0x0},
+ 1055: {region: 0x73, script: 0x5b, flags: 0x0},
+ 1056: {region: 0x98, script: 0xcc, flags: 0x0},
+ 1057: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1058: {region: 0x73, script: 0x5b, flags: 0x0},
+ 1059: {region: 0x165, script: 0x5b, flags: 0x0},
+ 1060: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1061: {region: 0xc4, script: 0x5b, flags: 0x0},
+ 1062: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1063: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1064: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1065: {region: 0x116, script: 0x5b, flags: 0x0},
+ 1066: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1067: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1068: {region: 0x124, script: 0xee, flags: 0x0},
+ 1069: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1070: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1071: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1072: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1073: {region: 0x27, script: 0x5b, flags: 0x0},
+ 1074: {region: 0x37, script: 0x5, flags: 0x1},
+ 1075: {region: 0x9a, script: 0xd9, flags: 0x0},
+ 1076: {region: 0x117, script: 0x5b, flags: 0x0},
+ 1077: {region: 0x115, script: 0x5b, flags: 0x0},
+ 1078: {region: 0x9a, script: 0x22, flags: 0x0},
+ 1079: {region: 0x162, script: 0x5b, flags: 0x0},
+ 1080: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1081: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1082: {region: 0x6e, script: 0x5b, flags: 0x0},
+ 1083: {region: 0x162, script: 0x5b, flags: 0x0},
+ 1084: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1085: {region: 0x61, script: 0x5b, flags: 0x0},
+ 1086: {region: 0x96, script: 0x5b, flags: 0x0},
+ 1087: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1088: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1089: {region: 0x130, script: 0x5b, flags: 0x0},
+ 1090: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1091: {region: 0x85, script: 0x5b, flags: 0x0},
+ 1092: {region: 0x10d, script: 0x5b, flags: 0x0},
+ 1093: {region: 0x130, script: 0x5b, flags: 0x0},
+ 1094: {region: 0x160, script: 0x5, flags: 0x0},
+ 1095: {region: 0x4b, script: 0x5b, flags: 0x0},
+ 1096: {region: 0x61, script: 0x5b, flags: 0x0},
+ 1097: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1098: {region: 0x9a, script: 0x22, flags: 0x0},
+ 1099: {region: 0x96, script: 0x5b, flags: 0x0},
+ 1100: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1101: {region: 0x35, script: 0xe, flags: 0x0},
+ 1102: {region: 0x9c, script: 0xde, flags: 0x0},
+ 1103: {region: 0xea, script: 0x5b, flags: 0x0},
+ 1104: {region: 0x9a, script: 0xe6, flags: 0x0},
+ 1105: {region: 0xdc, script: 0x22, flags: 0x0},
+ 1106: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1107: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1108: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1109: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1110: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1111: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1112: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1113: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1114: {region: 0xe8, script: 0x5b, flags: 0x0},
+ 1115: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1116: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1117: {region: 0x9a, script: 0x53, flags: 0x0},
+ 1118: {region: 0x53, script: 0xe4, flags: 0x0},
+ 1119: {region: 0xdc, script: 0x22, flags: 0x0},
+ 1120: {region: 0xdc, script: 0x22, flags: 0x0},
+ 1121: {region: 0x9a, script: 0xe9, flags: 0x0},
+ 1122: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1123: {region: 0x113, script: 0x5b, flags: 0x0},
+ 1124: {region: 0x132, script: 0x5b, flags: 0x0},
+ 1125: {region: 0x127, script: 0x5b, flags: 0x0},
+ 1126: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1127: {region: 0x3c, script: 0x3, flags: 0x1},
+ 1128: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1129: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1130: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1131: {region: 0x124, script: 0xee, flags: 0x0},
+ 1132: {region: 0xdc, script: 0x22, flags: 0x0},
+ 1133: {region: 0xdc, script: 0x22, flags: 0x0},
+ 1134: {region: 0xdc, script: 0x22, flags: 0x0},
+ 1135: {region: 0x70, script: 0x2c, flags: 0x0},
+ 1136: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1137: {region: 0x6e, script: 0x2c, flags: 0x0},
+ 1138: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1139: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1140: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1141: {region: 0xd7, script: 0x5b, flags: 0x0},
+ 1142: {region: 0x128, script: 0x5b, flags: 0x0},
+ 1143: {region: 0x126, script: 0x5b, flags: 0x0},
+ 1144: {region: 0x32, script: 0x5b, flags: 0x0},
+ 1145: {region: 0xdc, script: 0x22, flags: 0x0},
+ 1146: {region: 0xe8, script: 0x5b, flags: 0x0},
+ 1147: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1148: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1149: {region: 0x32, script: 0x5b, flags: 0x0},
+ 1150: {region: 0xd5, script: 0x5b, flags: 0x0},
+ 1151: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1152: {region: 0x162, script: 0x5b, flags: 0x0},
+ 1153: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1154: {region: 0x12a, script: 0x5b, flags: 0x0},
+ 1155: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1156: {region: 0xcf, script: 0x5b, flags: 0x0},
+ 1157: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1158: {region: 0xe7, script: 0x5b, flags: 0x0},
+ 1159: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1160: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1161: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1162: {region: 0x12c, script: 0x5b, flags: 0x0},
+ 1163: {region: 0x12c, script: 0x5b, flags: 0x0},
+ 1164: {region: 0x12f, script: 0x5b, flags: 0x0},
+ 1165: {region: 0x166, script: 0x5, flags: 0x0},
+ 1166: {region: 0x162, script: 0x5b, flags: 0x0},
+ 1167: {region: 0x88, script: 0x34, flags: 0x0},
+ 1168: {region: 0xdc, script: 0x22, flags: 0x0},
+ 1169: {region: 0xe8, script: 0x5b, flags: 0x0},
+ 1170: {region: 0x43, script: 0xef, flags: 0x0},
+ 1171: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1172: {region: 0x107, script: 0x20, flags: 0x0},
+ 1173: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1174: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1175: {region: 0x132, script: 0x5b, flags: 0x0},
+ 1176: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1177: {region: 0x124, script: 0xee, flags: 0x0},
+ 1178: {region: 0x32, script: 0x5b, flags: 0x0},
+ 1179: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1180: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1181: {region: 0xcf, script: 0x5b, flags: 0x0},
+ 1182: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1183: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1184: {region: 0x12e, script: 0x5b, flags: 0x0},
+ 1185: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1187: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1188: {region: 0xd5, script: 0x5b, flags: 0x0},
+ 1189: {region: 0x53, script: 0xe7, flags: 0x0},
+ 1190: {region: 0xe6, script: 0x5b, flags: 0x0},
+ 1191: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1192: {region: 0x107, script: 0x20, flags: 0x0},
+ 1193: {region: 0xbb, script: 0x5b, flags: 0x0},
+ 1194: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1195: {region: 0x107, script: 0x20, flags: 0x0},
+ 1196: {region: 0x3f, script: 0x4, flags: 0x1},
+ 1197: {region: 0x11d, script: 0xf3, flags: 0x0},
+ 1198: {region: 0x131, script: 0x20, flags: 0x0},
+ 1199: {region: 0x76, script: 0x5b, flags: 0x0},
+ 1200: {region: 0x2a, script: 0x5b, flags: 0x0},
+ 1202: {region: 0x43, script: 0x3, flags: 0x1},
+ 1203: {region: 0x9a, script: 0xe, flags: 0x0},
+ 1204: {region: 0xe9, script: 0x5, flags: 0x0},
+ 1205: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1206: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1207: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1208: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1209: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1210: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1211: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1212: {region: 0x46, script: 0x4, flags: 0x1},
+ 1213: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1214: {region: 0xb5, script: 0xf4, flags: 0x0},
+ 1215: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1216: {region: 0x162, script: 0x5b, flags: 0x0},
+ 1217: {region: 0x9f, script: 0x5b, flags: 0x0},
+ 1218: {region: 0x107, script: 0x5b, flags: 0x0},
+ 1219: {region: 0x13f, script: 0x5b, flags: 0x0},
+ 1220: {region: 0x11c, script: 0x5b, flags: 0x0},
+ 1221: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1222: {region: 0x36, script: 0x5b, flags: 0x0},
+ 1223: {region: 0x61, script: 0x5b, flags: 0x0},
+ 1224: {region: 0xd2, script: 0x5b, flags: 0x0},
+ 1225: {region: 0x1, script: 0x5b, flags: 0x0},
+ 1226: {region: 0x107, script: 0x5b, flags: 0x0},
+ 1227: {region: 0x6b, script: 0x5b, flags: 0x0},
+ 1228: {region: 0x130, script: 0x5b, flags: 0x0},
+ 1229: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1230: {region: 0x36, script: 0x5b, flags: 0x0},
+ 1231: {region: 0x4e, script: 0x5b, flags: 0x0},
+ 1232: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1233: {region: 0x70, script: 0x2c, flags: 0x0},
+ 1234: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1235: {region: 0xe8, script: 0x5b, flags: 0x0},
+ 1236: {region: 0x2f, script: 0x5b, flags: 0x0},
+ 1237: {region: 0x9a, script: 0xe9, flags: 0x0},
+ 1238: {region: 0x9a, script: 0x22, flags: 0x0},
+ 1239: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1240: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1241: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1242: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1243: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1244: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1245: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1246: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1247: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1248: {region: 0x141, script: 0x5b, flags: 0x0},
+ 1249: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1250: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1251: {region: 0xa9, script: 0x5, flags: 0x0},
+ 1252: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1253: {region: 0x115, script: 0x5b, flags: 0x0},
+ 1254: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1255: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1256: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1257: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1258: {region: 0x9a, script: 0x22, flags: 0x0},
+ 1259: {region: 0x53, script: 0x3b, flags: 0x0},
+ 1260: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1261: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1262: {region: 0x41, script: 0x5b, flags: 0x0},
+ 1263: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1264: {region: 0x12c, script: 0x18, flags: 0x0},
+ 1265: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1266: {region: 0x162, script: 0x5b, flags: 0x0},
+ 1267: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1268: {region: 0x12c, script: 0x63, flags: 0x0},
+ 1269: {region: 0x12c, script: 0x64, flags: 0x0},
+ 1270: {region: 0x7e, script: 0x2e, flags: 0x0},
+ 1271: {region: 0x53, script: 0x68, flags: 0x0},
+ 1272: {region: 0x10c, script: 0x6d, flags: 0x0},
+ 1273: {region: 0x109, script: 0x79, flags: 0x0},
+ 1274: {region: 0x9a, script: 0x22, flags: 0x0},
+ 1275: {region: 0x132, script: 0x5b, flags: 0x0},
+ 1276: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1277: {region: 0x9d, script: 0x93, flags: 0x0},
+ 1278: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1279: {region: 0x15f, script: 0xce, flags: 0x0},
+ 1280: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1281: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1282: {region: 0xdc, script: 0x22, flags: 0x0},
+ 1283: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1284: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1285: {region: 0xd2, script: 0x5b, flags: 0x0},
+ 1286: {region: 0x76, script: 0x5b, flags: 0x0},
+ 1287: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1288: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1289: {region: 0x52, script: 0x5b, flags: 0x0},
+ 1290: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1291: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1292: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1293: {region: 0x52, script: 0x5b, flags: 0x0},
+ 1294: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1295: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1296: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1297: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1298: {region: 0x1, script: 0x3e, flags: 0x0},
+ 1299: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1300: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1301: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1302: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1303: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1304: {region: 0xd7, script: 0x5b, flags: 0x0},
+ 1305: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1306: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1307: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1308: {region: 0x41, script: 0x5b, flags: 0x0},
+ 1309: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1310: {region: 0xd0, script: 0x5b, flags: 0x0},
+ 1311: {region: 0x4a, script: 0x3, flags: 0x1},
+ 1312: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1313: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1314: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1315: {region: 0x53, script: 0x5b, flags: 0x0},
+ 1316: {region: 0x10c, script: 0x5b, flags: 0x0},
+ 1318: {region: 0xa9, script: 0x5, flags: 0x0},
+ 1319: {region: 0xda, script: 0x5b, flags: 0x0},
+ 1320: {region: 0xbb, script: 0xeb, flags: 0x0},
+ 1321: {region: 0x4d, script: 0x14, flags: 0x1},
+ 1322: {region: 0x53, script: 0x7f, flags: 0x0},
+ 1323: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1324: {region: 0x123, script: 0x5b, flags: 0x0},
+ 1325: {region: 0xd1, script: 0x5b, flags: 0x0},
+ 1326: {region: 0x166, script: 0x5b, flags: 0x0},
+ 1327: {region: 0x162, script: 0x5b, flags: 0x0},
+ 1329: {region: 0x12c, script: 0x5b, flags: 0x0},
+}
+
+// likelyLangList holds lists info associated with likelyLang.
+// Size: 582 bytes, 97 elements
+var likelyLangList = [97]likelyScriptRegion{
+ 0: {region: 0x9d, script: 0x7, flags: 0x0},
+ 1: {region: 0xa2, script: 0x7a, flags: 0x2},
+ 2: {region: 0x11d, script: 0x87, flags: 0x2},
+ 3: {region: 0x32, script: 0x5b, flags: 0x0},
+ 4: {region: 0x9c, script: 0x5, flags: 0x4},
+ 5: {region: 0x9d, script: 0x5, flags: 0x4},
+ 6: {region: 0x107, script: 0x20, flags: 0x4},
+ 7: {region: 0x9d, script: 0x5, flags: 0x2},
+ 8: {region: 0x107, script: 0x20, flags: 0x0},
+ 9: {region: 0x38, script: 0x2f, flags: 0x2},
+ 10: {region: 0x136, script: 0x5b, flags: 0x0},
+ 11: {region: 0x7c, script: 0xd1, flags: 0x2},
+ 12: {region: 0x115, script: 0x5b, flags: 0x0},
+ 13: {region: 0x85, script: 0x1, flags: 0x2},
+ 14: {region: 0x5e, script: 0x1f, flags: 0x0},
+ 15: {region: 0x88, script: 0x60, flags: 0x2},
+ 16: {region: 0xd7, script: 0x5b, flags: 0x0},
+ 17: {region: 0x52, script: 0x5, flags: 0x4},
+ 18: {region: 0x10c, script: 0x5, flags: 0x4},
+ 19: {region: 0xaf, script: 0x20, flags: 0x0},
+ 20: {region: 0x24, script: 0x5, flags: 0x4},
+ 21: {region: 0x53, script: 0x5, flags: 0x4},
+ 22: {region: 0x9d, script: 0x5, flags: 0x4},
+ 23: {region: 0xc6, script: 0x5, flags: 0x4},
+ 24: {region: 0x53, script: 0x5, flags: 0x2},
+ 25: {region: 0x12c, script: 0x5b, flags: 0x0},
+ 26: {region: 0xb1, script: 0x5, flags: 0x4},
+ 27: {region: 0x9c, script: 0x5, flags: 0x2},
+ 28: {region: 0xa6, script: 0x20, flags: 0x0},
+ 29: {region: 0x53, script: 0x5, flags: 0x4},
+ 30: {region: 0x12c, script: 0x5b, flags: 0x4},
+ 31: {region: 0x53, script: 0x5, flags: 0x2},
+ 32: {region: 0x12c, script: 0x5b, flags: 0x2},
+ 33: {region: 0xdc, script: 0x22, flags: 0x0},
+ 34: {region: 0x9a, script: 0x5e, flags: 0x2},
+ 35: {region: 0x84, script: 0x5b, flags: 0x0},
+ 36: {region: 0x85, script: 0x7e, flags: 0x4},
+ 37: {region: 0x85, script: 0x7e, flags: 0x2},
+ 38: {region: 0xc6, script: 0x20, flags: 0x0},
+ 39: {region: 0x53, script: 0x71, flags: 0x4},
+ 40: {region: 0x53, script: 0x71, flags: 0x2},
+ 41: {region: 0xd1, script: 0x5b, flags: 0x0},
+ 42: {region: 0x4a, script: 0x5, flags: 0x4},
+ 43: {region: 0x96, script: 0x5, flags: 0x4},
+ 44: {region: 0x9a, script: 0x36, flags: 0x0},
+ 45: {region: 0xe9, script: 0x5, flags: 0x4},
+ 46: {region: 0xe9, script: 0x5, flags: 0x2},
+ 47: {region: 0x9d, script: 0x8d, flags: 0x0},
+ 48: {region: 0x53, script: 0x8e, flags: 0x2},
+ 49: {region: 0xbb, script: 0xeb, flags: 0x0},
+ 50: {region: 0xda, script: 0x5b, flags: 0x4},
+ 51: {region: 0xe9, script: 0x5, flags: 0x0},
+ 52: {region: 0x9a, script: 0x22, flags: 0x2},
+ 53: {region: 0x9a, script: 0x50, flags: 0x2},
+ 54: {region: 0x9a, script: 0xd5, flags: 0x2},
+ 55: {region: 0x106, script: 0x20, flags: 0x0},
+ 56: {region: 0xbe, script: 0x5b, flags: 0x4},
+ 57: {region: 0x105, script: 0x5b, flags: 0x4},
+ 58: {region: 0x107, script: 0x5b, flags: 0x4},
+ 59: {region: 0x12c, script: 0x5b, flags: 0x4},
+ 60: {region: 0x125, script: 0x20, flags: 0x0},
+ 61: {region: 0xe9, script: 0x5, flags: 0x4},
+ 62: {region: 0xe9, script: 0x5, flags: 0x2},
+ 63: {region: 0x53, script: 0x5, flags: 0x0},
+ 64: {region: 0xaf, script: 0x20, flags: 0x4},
+ 65: {region: 0xc6, script: 0x20, flags: 0x4},
+ 66: {region: 0xaf, script: 0x20, flags: 0x2},
+ 67: {region: 0x9a, script: 0xe, flags: 0x0},
+ 68: {region: 0xdc, script: 0x22, flags: 0x4},
+ 69: {region: 0xdc, script: 0x22, flags: 0x2},
+ 70: {region: 0x138, script: 0x5b, flags: 0x0},
+ 71: {region: 0x24, script: 0x5, flags: 0x4},
+ 72: {region: 0x53, script: 0x20, flags: 0x4},
+ 73: {region: 0x24, script: 0x5, flags: 0x2},
+ 74: {region: 0x8e, script: 0x3c, flags: 0x0},
+ 75: {region: 0x53, script: 0x3b, flags: 0x4},
+ 76: {region: 0x53, script: 0x3b, flags: 0x2},
+ 77: {region: 0x53, script: 0x3b, flags: 0x0},
+ 78: {region: 0x2f, script: 0x3c, flags: 0x4},
+ 79: {region: 0x3e, script: 0x3c, flags: 0x4},
+ 80: {region: 0x7c, script: 0x3c, flags: 0x4},
+ 81: {region: 0x7f, script: 0x3c, flags: 0x4},
+ 82: {region: 0x8e, script: 0x3c, flags: 0x4},
+ 83: {region: 0x96, script: 0x3c, flags: 0x4},
+ 84: {region: 0xc7, script: 0x3c, flags: 0x4},
+ 85: {region: 0xd1, script: 0x3c, flags: 0x4},
+ 86: {region: 0xe3, script: 0x3c, flags: 0x4},
+ 87: {region: 0xe6, script: 0x3c, flags: 0x4},
+ 88: {region: 0xe8, script: 0x3c, flags: 0x4},
+ 89: {region: 0x117, script: 0x3c, flags: 0x4},
+ 90: {region: 0x124, script: 0x3c, flags: 0x4},
+ 91: {region: 0x12f, script: 0x3c, flags: 0x4},
+ 92: {region: 0x136, script: 0x3c, flags: 0x4},
+ 93: {region: 0x13f, script: 0x3c, flags: 0x4},
+ 94: {region: 0x12f, script: 0x11, flags: 0x2},
+ 95: {region: 0x12f, script: 0x37, flags: 0x2},
+ 96: {region: 0x12f, script: 0x3c, flags: 0x2},
+}
+
+type likelyLangScript struct {
+ lang uint16
+ script uint16
+ flags uint8
+}
+
+// likelyRegion is a lookup table, indexed by regionID, for the most likely
+// languages and scripts given incomplete information. If more entries exist
+// for a given regionID, lang and script are the index and size respectively
+// of the list in likelyRegionList.
+// TODO: exclude containers and user-definable regions from the list.
+// Size: 2154 bytes, 359 elements
+var likelyRegion = [359]likelyLangScript{
+ 34: {lang: 0xd7, script: 0x5b, flags: 0x0},
+ 35: {lang: 0x3a, script: 0x5, flags: 0x0},
+ 36: {lang: 0x0, script: 0x2, flags: 0x1},
+ 39: {lang: 0x2, script: 0x2, flags: 0x1},
+ 40: {lang: 0x4, script: 0x2, flags: 0x1},
+ 42: {lang: 0x3c0, script: 0x5b, flags: 0x0},
+ 43: {lang: 0x0, script: 0x5b, flags: 0x0},
+ 44: {lang: 0x13e, script: 0x5b, flags: 0x0},
+ 45: {lang: 0x41b, script: 0x5b, flags: 0x0},
+ 46: {lang: 0x10d, script: 0x5b, flags: 0x0},
+ 48: {lang: 0x367, script: 0x5b, flags: 0x0},
+ 49: {lang: 0x444, script: 0x5b, flags: 0x0},
+ 50: {lang: 0x58, script: 0x5b, flags: 0x0},
+ 51: {lang: 0x6, script: 0x2, flags: 0x1},
+ 53: {lang: 0xa5, script: 0xe, flags: 0x0},
+ 54: {lang: 0x367, script: 0x5b, flags: 0x0},
+ 55: {lang: 0x15e, script: 0x5b, flags: 0x0},
+ 56: {lang: 0x7e, script: 0x20, flags: 0x0},
+ 57: {lang: 0x3a, script: 0x5, flags: 0x0},
+ 58: {lang: 0x3d9, script: 0x5b, flags: 0x0},
+ 59: {lang: 0x15e, script: 0x5b, flags: 0x0},
+ 60: {lang: 0x15e, script: 0x5b, flags: 0x0},
+ 62: {lang: 0x31f, script: 0x5b, flags: 0x0},
+ 63: {lang: 0x13e, script: 0x5b, flags: 0x0},
+ 64: {lang: 0x3a1, script: 0x5b, flags: 0x0},
+ 65: {lang: 0x3c0, script: 0x5b, flags: 0x0},
+ 67: {lang: 0x8, script: 0x2, flags: 0x1},
+ 69: {lang: 0x0, script: 0x5b, flags: 0x0},
+ 71: {lang: 0x71, script: 0x20, flags: 0x0},
+ 73: {lang: 0x512, script: 0x3e, flags: 0x2},
+ 74: {lang: 0x31f, script: 0x5, flags: 0x2},
+ 75: {lang: 0x445, script: 0x5b, flags: 0x0},
+ 76: {lang: 0x15e, script: 0x5b, flags: 0x0},
+ 77: {lang: 0x15e, script: 0x5b, flags: 0x0},
+ 78: {lang: 0x10d, script: 0x5b, flags: 0x0},
+ 79: {lang: 0x15e, script: 0x5b, flags: 0x0},
+ 81: {lang: 0x13e, script: 0x5b, flags: 0x0},
+ 82: {lang: 0x15e, script: 0x5b, flags: 0x0},
+ 83: {lang: 0xa, script: 0x4, flags: 0x1},
+ 84: {lang: 0x13e, script: 0x5b, flags: 0x0},
+ 85: {lang: 0x0, script: 0x5b, flags: 0x0},
+ 87: {lang: 0x13e, script: 0x5b, flags: 0x0},
+ 90: {lang: 0x13e, script: 0x5b, flags: 0x0},
+ 91: {lang: 0x3c0, script: 0x5b, flags: 0x0},
+ 92: {lang: 0x3a1, script: 0x5b, flags: 0x0},
+ 94: {lang: 0xe, script: 0x2, flags: 0x1},
+ 95: {lang: 0xfa, script: 0x5b, flags: 0x0},
+ 97: {lang: 0x10d, script: 0x5b, flags: 0x0},
+ 99: {lang: 0x1, script: 0x5b, flags: 0x0},
+ 100: {lang: 0x101, script: 0x5b, flags: 0x0},
+ 102: {lang: 0x13e, script: 0x5b, flags: 0x0},
+ 104: {lang: 0x10, script: 0x2, flags: 0x1},
+ 105: {lang: 0x13e, script: 0x5b, flags: 0x0},
+ 106: {lang: 0x13e, script: 0x5b, flags: 0x0},
+ 107: {lang: 0x140, script: 0x5b, flags: 0x0},
+ 108: {lang: 0x3a, script: 0x5, flags: 0x0},
+ 109: {lang: 0x3a, script: 0x5, flags: 0x0},
+ 110: {lang: 0x46f, script: 0x2c, flags: 0x0},
+ 111: {lang: 0x13e, script: 0x5b, flags: 0x0},
+ 112: {lang: 0x12, script: 0x2, flags: 0x1},
+ 114: {lang: 0x10d, script: 0x5b, flags: 0x0},
+ 115: {lang: 0x151, script: 0x5b, flags: 0x0},
+ 116: {lang: 0x1c0, script: 0x22, flags: 0x2},
+ 119: {lang: 0x158, script: 0x5b, flags: 0x0},
+ 121: {lang: 0x15e, script: 0x5b, flags: 0x0},
+ 123: {lang: 0x15e, script: 0x5b, flags: 0x0},
+ 124: {lang: 0x14, script: 0x2, flags: 0x1},
+ 126: {lang: 0x16, script: 0x3, flags: 0x1},
+ 127: {lang: 0x15e, script: 0x5b, flags: 0x0},
+ 129: {lang: 0x21, script: 0x5b, flags: 0x0},
+ 131: {lang: 0x245, script: 0x5b, flags: 0x0},
+ 133: {lang: 0x15e, script: 0x5b, flags: 0x0},
+ 134: {lang: 0x15e, script: 0x5b, flags: 0x0},
+ 135: {lang: 0x13e, script: 0x5b, flags: 0x0},
+ 136: {lang: 0x19, script: 0x2, flags: 0x1},
+ 137: {lang: 0x0, script: 0x5b, flags: 0x0},
+ 138: {lang: 0x13e, script: 0x5b, flags: 0x0},
+ 140: {lang: 0x3c0, script: 0x5b, flags: 0x0},
+ 142: {lang: 0x529, script: 0x3c, flags: 0x0},
+ 143: {lang: 0x0, script: 0x5b, flags: 0x0},
+ 144: {lang: 0x13e, script: 0x5b, flags: 0x0},
+ 145: {lang: 0x1d1, script: 0x5b, flags: 0x0},
+ 146: {lang: 0x1d4, script: 0x5b, flags: 0x0},
+ 147: {lang: 0x1d5, script: 0x5b, flags: 0x0},
+ 149: {lang: 0x13e, script: 0x5b, flags: 0x0},
+ 150: {lang: 0x1b, script: 0x2, flags: 0x1},
+ 152: {lang: 0x1bc, script: 0x3e, flags: 0x0},
+ 154: {lang: 0x1d, script: 0x3, flags: 0x1},
+ 156: {lang: 0x3a, script: 0x5, flags: 0x0},
+ 157: {lang: 0x20, script: 0x2, flags: 0x1},
+ 158: {lang: 0x1f8, script: 0x5b, flags: 0x0},
+ 159: {lang: 0x1f9, script: 0x5b, flags: 0x0},
+ 162: {lang: 0x3a, script: 0x5, flags: 0x0},
+ 163: {lang: 0x200, script: 0x49, flags: 0x0},
+ 165: {lang: 0x445, script: 0x5b, flags: 0x0},
+ 166: {lang: 0x28a, script: 0x20, flags: 0x0},
+ 167: {lang: 0x22, script: 0x3, flags: 0x1},
+ 169: {lang: 0x25, script: 0x2, flags: 0x1},
+ 171: {lang: 0x254, script: 0x54, flags: 0x0},
+ 172: {lang: 0x254, script: 0x54, flags: 0x0},
+ 173: {lang: 0x3a, script: 0x5, flags: 0x0},
+ 175: {lang: 0x3e2, script: 0x20, flags: 0x0},
+ 176: {lang: 0x27, script: 0x2, flags: 0x1},
+ 177: {lang: 0x3a, script: 0x5, flags: 0x0},
+ 179: {lang: 0x10d, script: 0x5b, flags: 0x0},
+ 180: {lang: 0x40c, script: 0xd6, flags: 0x0},
+ 182: {lang: 0x43b, script: 0x5b, flags: 0x0},
+ 183: {lang: 0x2c0, script: 0x5b, flags: 0x0},
+ 184: {lang: 0x15e, script: 0x5b, flags: 0x0},
+ 185: {lang: 0x2c7, script: 0x5b, flags: 0x0},
+ 186: {lang: 0x3a, script: 0x5, flags: 0x0},
+ 187: {lang: 0x29, script: 0x2, flags: 0x1},
+ 188: {lang: 0x15e, script: 0x5b, flags: 0x0},
+ 189: {lang: 0x2b, script: 0x2, flags: 0x1},
+ 190: {lang: 0x432, script: 0x5b, flags: 0x0},
+ 191: {lang: 0x15e, script: 0x5b, flags: 0x0},
+ 192: {lang: 0x2f1, script: 0x5b, flags: 0x0},
+ 195: {lang: 0x2d, script: 0x2, flags: 0x1},
+ 196: {lang: 0xa0, script: 0x5b, flags: 0x0},
+ 197: {lang: 0x2f, script: 0x2, flags: 0x1},
+ 198: {lang: 0x31, script: 0x2, flags: 0x1},
+ 199: {lang: 0x33, script: 0x2, flags: 0x1},
+ 201: {lang: 0x15e, script: 0x5b, flags: 0x0},
+ 202: {lang: 0x35, script: 0x2, flags: 0x1},
+ 204: {lang: 0x320, script: 0x5b, flags: 0x0},
+ 205: {lang: 0x37, script: 0x3, flags: 0x1},
+ 206: {lang: 0x128, script: 0xed, flags: 0x0},
+ 208: {lang: 0x13e, script: 0x5b, flags: 0x0},
+ 209: {lang: 0x31f, script: 0x5b, flags: 0x0},
+ 210: {lang: 0x3c0, script: 0x5b, flags: 0x0},
+ 211: {lang: 0x16, script: 0x5b, flags: 0x0},
+ 212: {lang: 0x15e, script: 0x5b, flags: 0x0},
+ 213: {lang: 0x1b4, script: 0x5b, flags: 0x0},
+ 215: {lang: 0x1b4, script: 0x5, flags: 0x2},
+ 217: {lang: 0x13e, script: 0x5b, flags: 0x0},
+ 218: {lang: 0x367, script: 0x5b, flags: 0x0},
+ 219: {lang: 0x347, script: 0x5b, flags: 0x0},
+ 220: {lang: 0x351, script: 0x22, flags: 0x0},
+ 226: {lang: 0x3a, script: 0x5, flags: 0x0},
+ 227: {lang: 0x13e, script: 0x5b, flags: 0x0},
+ 229: {lang: 0x13e, script: 0x5b, flags: 0x0},
+ 230: {lang: 0x15e, script: 0x5b, flags: 0x0},
+ 231: {lang: 0x486, script: 0x5b, flags: 0x0},
+ 232: {lang: 0x153, script: 0x5b, flags: 0x0},
+ 233: {lang: 0x3a, script: 0x3, flags: 0x1},
+ 234: {lang: 0x3b3, script: 0x5b, flags: 0x0},
+ 235: {lang: 0x15e, script: 0x5b, flags: 0x0},
+ 237: {lang: 0x13e, script: 0x5b, flags: 0x0},
+ 238: {lang: 0x3a, script: 0x5, flags: 0x0},
+ 239: {lang: 0x3c0, script: 0x5b, flags: 0x0},
+ 241: {lang: 0x3a2, script: 0x5b, flags: 0x0},
+ 242: {lang: 0x194, script: 0x5b, flags: 0x0},
+ 244: {lang: 0x3a, script: 0x5, flags: 0x0},
+ 259: {lang: 0x15e, script: 0x5b, flags: 0x0},
+ 261: {lang: 0x3d, script: 0x2, flags: 0x1},
+ 262: {lang: 0x432, script: 0x20, flags: 0x0},
+ 263: {lang: 0x3f, script: 0x2, flags: 0x1},
+ 264: {lang: 0x3e5, script: 0x5b, flags: 0x0},
+ 265: {lang: 0x3a, script: 0x5, flags: 0x0},
+ 267: {lang: 0x15e, script: 0x5b, flags: 0x0},
+ 268: {lang: 0x3a, script: 0x5, flags: 0x0},
+ 269: {lang: 0x41, script: 0x2, flags: 0x1},
+ 272: {lang: 0x416, script: 0x5b, flags: 0x0},
+ 273: {lang: 0x347, script: 0x5b, flags: 0x0},
+ 274: {lang: 0x43, script: 0x2, flags: 0x1},
+ 276: {lang: 0x1f9, script: 0x5b, flags: 0x0},
+ 277: {lang: 0x15e, script: 0x5b, flags: 0x0},
+ 278: {lang: 0x429, script: 0x5b, flags: 0x0},
+ 279: {lang: 0x367, script: 0x5b, flags: 0x0},
+ 281: {lang: 0x3c0, script: 0x5b, flags: 0x0},
+ 283: {lang: 0x13e, script: 0x5b, flags: 0x0},
+ 285: {lang: 0x45, script: 0x2, flags: 0x1},
+ 289: {lang: 0x15e, script: 0x5b, flags: 0x0},
+ 290: {lang: 0x15e, script: 0x5b, flags: 0x0},
+ 291: {lang: 0x47, script: 0x2, flags: 0x1},
+ 292: {lang: 0x49, script: 0x3, flags: 0x1},
+ 293: {lang: 0x4c, script: 0x2, flags: 0x1},
+ 294: {lang: 0x477, script: 0x5b, flags: 0x0},
+ 295: {lang: 0x3c0, script: 0x5b, flags: 0x0},
+ 296: {lang: 0x476, script: 0x5b, flags: 0x0},
+ 297: {lang: 0x4e, script: 0x2, flags: 0x1},
+ 298: {lang: 0x482, script: 0x5b, flags: 0x0},
+ 300: {lang: 0x50, script: 0x4, flags: 0x1},
+ 302: {lang: 0x4a0, script: 0x5b, flags: 0x0},
+ 303: {lang: 0x54, script: 0x2, flags: 0x1},
+ 304: {lang: 0x445, script: 0x5b, flags: 0x0},
+ 305: {lang: 0x56, script: 0x3, flags: 0x1},
+ 306: {lang: 0x445, script: 0x5b, flags: 0x0},
+ 310: {lang: 0x512, script: 0x3e, flags: 0x2},
+ 311: {lang: 0x13e, script: 0x5b, flags: 0x0},
+ 312: {lang: 0x4bc, script: 0x5b, flags: 0x0},
+ 313: {lang: 0x1f9, script: 0x5b, flags: 0x0},
+ 316: {lang: 0x13e, script: 0x5b, flags: 0x0},
+ 319: {lang: 0x4c3, script: 0x5b, flags: 0x0},
+ 320: {lang: 0x8a, script: 0x5b, flags: 0x0},
+ 321: {lang: 0x15e, script: 0x5b, flags: 0x0},
+ 323: {lang: 0x41b, script: 0x5b, flags: 0x0},
+ 334: {lang: 0x59, script: 0x2, flags: 0x1},
+ 351: {lang: 0x3a, script: 0x5, flags: 0x0},
+ 352: {lang: 0x5b, script: 0x2, flags: 0x1},
+ 357: {lang: 0x423, script: 0x5b, flags: 0x0},
+}
+
+// likelyRegionList holds lists info associated with likelyRegion.
+// Size: 558 bytes, 93 elements
+var likelyRegionList = [93]likelyLangScript{
+ 0: {lang: 0x148, script: 0x5, flags: 0x0},
+ 1: {lang: 0x476, script: 0x5b, flags: 0x0},
+ 2: {lang: 0x431, script: 0x5b, flags: 0x0},
+ 3: {lang: 0x2ff, script: 0x20, flags: 0x0},
+ 4: {lang: 0x1d7, script: 0x8, flags: 0x0},
+ 5: {lang: 0x274, script: 0x5b, flags: 0x0},
+ 6: {lang: 0xb7, script: 0x5b, flags: 0x0},
+ 7: {lang: 0x432, script: 0x20, flags: 0x0},
+ 8: {lang: 0x12d, script: 0xef, flags: 0x0},
+ 9: {lang: 0x351, script: 0x22, flags: 0x0},
+ 10: {lang: 0x529, script: 0x3b, flags: 0x0},
+ 11: {lang: 0x4ac, script: 0x5, flags: 0x0},
+ 12: {lang: 0x523, script: 0x5b, flags: 0x0},
+ 13: {lang: 0x29a, script: 0xee, flags: 0x0},
+ 14: {lang: 0x136, script: 0x34, flags: 0x0},
+ 15: {lang: 0x48a, script: 0x5b, flags: 0x0},
+ 16: {lang: 0x3a, script: 0x5, flags: 0x0},
+ 17: {lang: 0x15e, script: 0x5b, flags: 0x0},
+ 18: {lang: 0x27, script: 0x2c, flags: 0x0},
+ 19: {lang: 0x139, script: 0x5b, flags: 0x0},
+ 20: {lang: 0x26a, script: 0x5, flags: 0x2},
+ 21: {lang: 0x512, script: 0x3e, flags: 0x2},
+ 22: {lang: 0x210, script: 0x2e, flags: 0x0},
+ 23: {lang: 0x5, script: 0x20, flags: 0x0},
+ 24: {lang: 0x274, script: 0x5b, flags: 0x0},
+ 25: {lang: 0x136, script: 0x34, flags: 0x0},
+ 26: {lang: 0x2ff, script: 0x20, flags: 0x0},
+ 27: {lang: 0x1e1, script: 0x5b, flags: 0x0},
+ 28: {lang: 0x31f, script: 0x5, flags: 0x0},
+ 29: {lang: 0x1be, script: 0x22, flags: 0x0},
+ 30: {lang: 0x4b4, script: 0x5, flags: 0x0},
+ 31: {lang: 0x236, script: 0x76, flags: 0x0},
+ 32: {lang: 0x148, script: 0x5, flags: 0x0},
+ 33: {lang: 0x476, script: 0x5b, flags: 0x0},
+ 34: {lang: 0x24a, script: 0x4f, flags: 0x0},
+ 35: {lang: 0xe6, script: 0x5, flags: 0x0},
+ 36: {lang: 0x226, script: 0xee, flags: 0x0},
+ 37: {lang: 0x3a, script: 0x5, flags: 0x0},
+ 38: {lang: 0x15e, script: 0x5b, flags: 0x0},
+ 39: {lang: 0x2b8, script: 0x58, flags: 0x0},
+ 40: {lang: 0x226, script: 0xee, flags: 0x0},
+ 41: {lang: 0x3a, script: 0x5, flags: 0x0},
+ 42: {lang: 0x15e, script: 0x5b, flags: 0x0},
+ 43: {lang: 0x3dc, script: 0x5b, flags: 0x0},
+ 44: {lang: 0x4ae, script: 0x20, flags: 0x0},
+ 45: {lang: 0x2ff, script: 0x20, flags: 0x0},
+ 46: {lang: 0x431, script: 0x5b, flags: 0x0},
+ 47: {lang: 0x331, script: 0x76, flags: 0x0},
+ 48: {lang: 0x213, script: 0x5b, flags: 0x0},
+ 49: {lang: 0x30b, script: 0x20, flags: 0x0},
+ 50: {lang: 0x242, script: 0x5, flags: 0x0},
+ 51: {lang: 0x529, script: 0x3c, flags: 0x0},
+ 52: {lang: 0x3c0, script: 0x5b, flags: 0x0},
+ 53: {lang: 0x3a, script: 0x5, flags: 0x0},
+ 54: {lang: 0x15e, script: 0x5b, flags: 0x0},
+ 55: {lang: 0x2ed, script: 0x5b, flags: 0x0},
+ 56: {lang: 0x4b4, script: 0x5, flags: 0x0},
+ 57: {lang: 0x88, script: 0x22, flags: 0x0},
+ 58: {lang: 0x4b4, script: 0x5, flags: 0x0},
+ 59: {lang: 0x4b4, script: 0x5, flags: 0x0},
+ 60: {lang: 0xbe, script: 0x22, flags: 0x0},
+ 61: {lang: 0x3dc, script: 0x5b, flags: 0x0},
+ 62: {lang: 0x7e, script: 0x20, flags: 0x0},
+ 63: {lang: 0x3e2, script: 0x20, flags: 0x0},
+ 64: {lang: 0x267, script: 0x5b, flags: 0x0},
+ 65: {lang: 0x444, script: 0x5b, flags: 0x0},
+ 66: {lang: 0x512, script: 0x3e, flags: 0x0},
+ 67: {lang: 0x412, script: 0x5b, flags: 0x0},
+ 68: {lang: 0x4ae, script: 0x20, flags: 0x0},
+ 69: {lang: 0x3a, script: 0x5, flags: 0x0},
+ 70: {lang: 0x15e, script: 0x5b, flags: 0x0},
+ 71: {lang: 0x15e, script: 0x5b, flags: 0x0},
+ 72: {lang: 0x35, script: 0x5, flags: 0x0},
+ 73: {lang: 0x46b, script: 0xee, flags: 0x0},
+ 74: {lang: 0x2ec, script: 0x5, flags: 0x0},
+ 75: {lang: 0x30f, script: 0x76, flags: 0x0},
+ 76: {lang: 0x467, script: 0x20, flags: 0x0},
+ 77: {lang: 0x148, script: 0x5, flags: 0x0},
+ 78: {lang: 0x3a, script: 0x5, flags: 0x0},
+ 79: {lang: 0x15e, script: 0x5b, flags: 0x0},
+ 80: {lang: 0x48a, script: 0x5b, flags: 0x0},
+ 81: {lang: 0x58, script: 0x5, flags: 0x0},
+ 82: {lang: 0x219, script: 0x20, flags: 0x0},
+ 83: {lang: 0x81, script: 0x34, flags: 0x0},
+ 84: {lang: 0x529, script: 0x3c, flags: 0x0},
+ 85: {lang: 0x48c, script: 0x5b, flags: 0x0},
+ 86: {lang: 0x4ae, script: 0x20, flags: 0x0},
+ 87: {lang: 0x512, script: 0x3e, flags: 0x0},
+ 88: {lang: 0x3b3, script: 0x5b, flags: 0x0},
+ 89: {lang: 0x431, script: 0x5b, flags: 0x0},
+ 90: {lang: 0x432, script: 0x20, flags: 0x0},
+ 91: {lang: 0x15e, script: 0x5b, flags: 0x0},
+ 92: {lang: 0x446, script: 0x5, flags: 0x0},
+}
+
+type likelyTag struct {
+ lang uint16
+ region uint16
+ script uint16
+}
+
+// Size: 198 bytes, 33 elements
+var likelyRegionGroup = [33]likelyTag{
+ 1: {lang: 0x139, region: 0xd7, script: 0x5b},
+ 2: {lang: 0x139, region: 0x136, script: 0x5b},
+ 3: {lang: 0x3c0, region: 0x41, script: 0x5b},
+ 4: {lang: 0x139, region: 0x2f, script: 0x5b},
+ 5: {lang: 0x139, region: 0xd7, script: 0x5b},
+ 6: {lang: 0x13e, region: 0xd0, script: 0x5b},
+ 7: {lang: 0x445, region: 0x130, script: 0x5b},
+ 8: {lang: 0x3a, region: 0x6c, script: 0x5},
+ 9: {lang: 0x445, region: 0x4b, script: 0x5b},
+ 10: {lang: 0x139, region: 0x162, script: 0x5b},
+ 11: {lang: 0x139, region: 0x136, script: 0x5b},
+ 12: {lang: 0x139, region: 0x136, script: 0x5b},
+ 13: {lang: 0x13e, region: 0x5a, script: 0x5b},
+ 14: {lang: 0x529, region: 0x53, script: 0x3b},
+ 15: {lang: 0x1be, region: 0x9a, script: 0x22},
+ 16: {lang: 0x1e1, region: 0x96, script: 0x5b},
+ 17: {lang: 0x1f9, region: 0x9f, script: 0x5b},
+ 18: {lang: 0x139, region: 0x2f, script: 0x5b},
+ 19: {lang: 0x139, region: 0xe7, script: 0x5b},
+ 20: {lang: 0x139, region: 0x8b, script: 0x5b},
+ 21: {lang: 0x41b, region: 0x143, script: 0x5b},
+ 22: {lang: 0x529, region: 0x53, script: 0x3b},
+ 23: {lang: 0x4bc, region: 0x138, script: 0x5b},
+ 24: {lang: 0x3a, region: 0x109, script: 0x5},
+ 25: {lang: 0x3e2, region: 0x107, script: 0x20},
+ 26: {lang: 0x3e2, region: 0x107, script: 0x20},
+ 27: {lang: 0x139, region: 0x7c, script: 0x5b},
+ 28: {lang: 0x10d, region: 0x61, script: 0x5b},
+ 29: {lang: 0x139, region: 0xd7, script: 0x5b},
+ 30: {lang: 0x13e, region: 0x1f, script: 0x5b},
+ 31: {lang: 0x139, region: 0x9b, script: 0x5b},
+ 32: {lang: 0x139, region: 0x7c, script: 0x5b},
+}
+
+// Size: 264 bytes, 33 elements
+var regionContainment = [33]uint64{
+ // Entry 0 - 1F
+ 0x00000001ffffffff, 0x00000000200007a2, 0x0000000000003044, 0x0000000000000008,
+ 0x00000000803c0010, 0x0000000000000020, 0x0000000000000040, 0x0000000000000080,
+ 0x0000000000000100, 0x0000000000000200, 0x0000000000000400, 0x000000004000384c,
+ 0x0000000000001000, 0x0000000000002000, 0x0000000000004000, 0x0000000000008000,
+ 0x0000000000010000, 0x0000000000020000, 0x0000000000040000, 0x0000000000080000,
+ 0x0000000000100000, 0x0000000000200000, 0x0000000001c1c000, 0x0000000000800000,
+ 0x0000000001000000, 0x000000001e020000, 0x0000000004000000, 0x0000000008000000,
+ 0x0000000010000000, 0x00000000200006a0, 0x0000000040002048, 0x0000000080000000,
+ // Entry 20 - 3F
+ 0x0000000100000000,
+}
+
+// regionInclusion maps region identifiers to sets of regions in regionInclusionBits,
+// where each set holds all groupings that are directly connected in a region
+// containment graph.
+// Size: 359 bytes, 359 elements
+var regionInclusion = [359]uint8{
+ // Entry 0 - 3F
+ 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06,
+ 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e,
+ 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16,
+ 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e,
+ 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x26, 0x23,
+ 0x24, 0x26, 0x27, 0x22, 0x28, 0x29, 0x2a, 0x2b,
+ 0x26, 0x2c, 0x24, 0x23, 0x26, 0x25, 0x2a, 0x2d,
+ 0x2e, 0x24, 0x2f, 0x2d, 0x26, 0x30, 0x31, 0x28,
+ // Entry 40 - 7F
+ 0x26, 0x28, 0x26, 0x25, 0x31, 0x22, 0x32, 0x33,
+ 0x34, 0x30, 0x22, 0x27, 0x27, 0x27, 0x35, 0x2d,
+ 0x29, 0x28, 0x27, 0x36, 0x28, 0x22, 0x21, 0x34,
+ 0x23, 0x21, 0x26, 0x2d, 0x26, 0x22, 0x37, 0x2e,
+ 0x35, 0x2a, 0x22, 0x2f, 0x38, 0x26, 0x26, 0x21,
+ 0x39, 0x39, 0x28, 0x38, 0x39, 0x39, 0x2f, 0x3a,
+ 0x2f, 0x20, 0x21, 0x38, 0x3b, 0x28, 0x3c, 0x2c,
+ 0x21, 0x2a, 0x35, 0x27, 0x38, 0x26, 0x24, 0x28,
+ // Entry 80 - BF
+ 0x2c, 0x2d, 0x23, 0x30, 0x2d, 0x2d, 0x26, 0x27,
+ 0x3a, 0x22, 0x34, 0x3c, 0x2d, 0x28, 0x36, 0x22,
+ 0x34, 0x3a, 0x26, 0x2e, 0x21, 0x39, 0x31, 0x38,
+ 0x24, 0x2c, 0x25, 0x22, 0x24, 0x25, 0x2c, 0x3a,
+ 0x2c, 0x26, 0x24, 0x36, 0x21, 0x2f, 0x3d, 0x31,
+ 0x3c, 0x2f, 0x26, 0x36, 0x36, 0x24, 0x26, 0x3d,
+ 0x31, 0x24, 0x26, 0x35, 0x25, 0x2d, 0x32, 0x38,
+ 0x2a, 0x38, 0x39, 0x39, 0x35, 0x33, 0x23, 0x26,
+ // Entry C0 - FF
+ 0x2f, 0x3c, 0x21, 0x23, 0x2d, 0x31, 0x36, 0x36,
+ 0x3c, 0x26, 0x2d, 0x26, 0x3a, 0x2f, 0x25, 0x2f,
+ 0x34, 0x31, 0x2f, 0x32, 0x3b, 0x2d, 0x2b, 0x2d,
+ 0x21, 0x34, 0x2a, 0x2c, 0x25, 0x21, 0x3c, 0x24,
+ 0x29, 0x2b, 0x24, 0x34, 0x21, 0x28, 0x29, 0x3b,
+ 0x31, 0x25, 0x2e, 0x30, 0x29, 0x26, 0x24, 0x3a,
+ 0x21, 0x3c, 0x28, 0x21, 0x24, 0x21, 0x21, 0x1f,
+ 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21,
+ // Entry 100 - 13F
+ 0x21, 0x21, 0x21, 0x2f, 0x21, 0x2e, 0x23, 0x33,
+ 0x2f, 0x24, 0x3b, 0x2f, 0x39, 0x38, 0x31, 0x2d,
+ 0x3a, 0x2c, 0x2e, 0x2d, 0x23, 0x2d, 0x2f, 0x28,
+ 0x2f, 0x27, 0x33, 0x34, 0x26, 0x24, 0x32, 0x22,
+ 0x26, 0x27, 0x22, 0x2d, 0x31, 0x3d, 0x29, 0x31,
+ 0x3d, 0x39, 0x29, 0x31, 0x24, 0x26, 0x29, 0x36,
+ 0x2f, 0x33, 0x2f, 0x21, 0x22, 0x21, 0x30, 0x28,
+ 0x3d, 0x23, 0x26, 0x21, 0x28, 0x26, 0x26, 0x31,
+ // Entry 140 - 17F
+ 0x3b, 0x29, 0x21, 0x29, 0x21, 0x21, 0x21, 0x21,
+ 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x23, 0x21,
+ 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21,
+ 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x24, 0x24,
+ 0x2f, 0x23, 0x32, 0x2f, 0x27, 0x2f, 0x21,
+}
+
+// regionInclusionBits is an array of bit vectors where every vector represents
+// a set of region groupings. These sets are used to compute the distance
+// between two regions for the purpose of language matching.
+// Size: 584 bytes, 73 elements
+var regionInclusionBits = [73]uint64{
+ // Entry 0 - 1F
+ 0x0000000102400813, 0x00000000200007a3, 0x0000000000003844, 0x0000000040000808,
+ 0x00000000803c0011, 0x0000000020000022, 0x0000000040000844, 0x0000000020000082,
+ 0x0000000000000102, 0x0000000020000202, 0x0000000020000402, 0x000000004000384d,
+ 0x0000000000001804, 0x0000000040002804, 0x0000000000404000, 0x0000000000408000,
+ 0x0000000000410000, 0x0000000002020000, 0x0000000000040010, 0x0000000000080010,
+ 0x0000000000100010, 0x0000000000200010, 0x0000000001c1c001, 0x0000000000c00000,
+ 0x0000000001400000, 0x000000001e020001, 0x0000000006000000, 0x000000000a000000,
+ 0x0000000012000000, 0x00000000200006a2, 0x0000000040002848, 0x0000000080000010,
+ // Entry 20 - 3F
+ 0x0000000100000001, 0x0000000000000001, 0x0000000080000000, 0x0000000000020000,
+ 0x0000000001000000, 0x0000000000008000, 0x0000000000002000, 0x0000000000000200,
+ 0x0000000000000008, 0x0000000000200000, 0x0000000110000000, 0x0000000000040000,
+ 0x0000000008000000, 0x0000000000000020, 0x0000000104000000, 0x0000000000000080,
+ 0x0000000000001000, 0x0000000000010000, 0x0000000000000400, 0x0000000004000000,
+ 0x0000000000000040, 0x0000000010000000, 0x0000000000004000, 0x0000000101000000,
+ 0x0000000108000000, 0x0000000000000100, 0x0000000100020000, 0x0000000000080000,
+ 0x0000000000100000, 0x0000000000800000, 0x00000001ffffffff, 0x0000000122400fb3,
+ // Entry 40 - 5F
+ 0x00000001827c0813, 0x000000014240385f, 0x0000000103c1c813, 0x000000011e420813,
+ 0x0000000112000001, 0x0000000106000001, 0x0000000101400001, 0x000000010a000001,
+ 0x0000000102020001,
+}
+
+// regionInclusionNext marks, for each entry in regionInclusionBits, the set of
+// all groups that are reachable from the groups set in the respective entry.
+// Size: 73 bytes, 73 elements
+var regionInclusionNext = [73]uint8{
+ // Entry 0 - 3F
+ 0x3e, 0x3f, 0x0b, 0x0b, 0x40, 0x01, 0x0b, 0x01,
+ 0x01, 0x01, 0x01, 0x41, 0x0b, 0x0b, 0x16, 0x16,
+ 0x16, 0x19, 0x04, 0x04, 0x04, 0x04, 0x42, 0x16,
+ 0x16, 0x43, 0x19, 0x19, 0x19, 0x01, 0x0b, 0x04,
+ 0x00, 0x00, 0x1f, 0x11, 0x18, 0x0f, 0x0d, 0x09,
+ 0x03, 0x15, 0x44, 0x12, 0x1b, 0x05, 0x45, 0x07,
+ 0x0c, 0x10, 0x0a, 0x1a, 0x06, 0x1c, 0x0e, 0x46,
+ 0x47, 0x08, 0x48, 0x13, 0x14, 0x17, 0x3e, 0x3e,
+ // Entry 40 - 7F
+ 0x3e, 0x3e, 0x3e, 0x3e, 0x43, 0x43, 0x42, 0x43,
+ 0x43,
+}
+
+type parentRel struct {
+ lang uint16
+ script uint16
+ maxScript uint16
+ toRegion uint16
+ fromRegion []uint16
+}
+
+// Size: 414 bytes, 5 elements
+var parents = [5]parentRel{
+ 0: {lang: 0x139, script: 0x0, maxScript: 0x5b, toRegion: 0x1, fromRegion: []uint16{0x1a, 0x25, 0x26, 0x2f, 0x34, 0x36, 0x3d, 0x42, 0x46, 0x48, 0x49, 0x4a, 0x50, 0x52, 0x5d, 0x5e, 0x62, 0x65, 0x6e, 0x74, 0x75, 0x76, 0x7c, 0x7d, 0x80, 0x81, 0x82, 0x84, 0x8d, 0x8e, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0xa0, 0xa1, 0xa5, 0xa8, 0xaa, 0xae, 0xb2, 0xb5, 0xb6, 0xc0, 0xc7, 0xcb, 0xcc, 0xcd, 0xcf, 0xd1, 0xd3, 0xd6, 0xd7, 0xde, 0xe0, 0xe1, 0xe7, 0xe8, 0xe9, 0xec, 0xf1, 0x108, 0x10a, 0x10b, 0x10c, 0x10e, 0x10f, 0x113, 0x118, 0x11c, 0x11e, 0x120, 0x126, 0x12a, 0x12d, 0x12e, 0x130, 0x132, 0x13a, 0x13d, 0x140, 0x143, 0x162, 0x163, 0x165}},
+ 1: {lang: 0x139, script: 0x0, maxScript: 0x5b, toRegion: 0x1a, fromRegion: []uint16{0x2e, 0x4e, 0x61, 0x64, 0x73, 0xda, 0x10d, 0x110}},
+ 2: {lang: 0x13e, script: 0x0, maxScript: 0x5b, toRegion: 0x1f, fromRegion: []uint16{0x2c, 0x3f, 0x41, 0x48, 0x51, 0x54, 0x57, 0x5a, 0x66, 0x6a, 0x8a, 0x90, 0xd0, 0xd9, 0xe3, 0xe5, 0xed, 0xf2, 0x11b, 0x136, 0x137, 0x13c}},
+ 3: {lang: 0x3c0, script: 0x0, maxScript: 0x5b, toRegion: 0xef, fromRegion: []uint16{0x2a, 0x4e, 0x5b, 0x87, 0x8c, 0xb8, 0xc7, 0xd2, 0x119, 0x127}},
+ 4: {lang: 0x529, script: 0x3c, maxScript: 0x3c, toRegion: 0x8e, fromRegion: []uint16{0xc7}},
+}
+
+// Total table size 30466 bytes (29KiB); checksum: 7544152B
diff --git a/vendor/golang.org/x/text/internal/language/tags.go b/vendor/golang.org/x/text/internal/language/tags.go
new file mode 100644
index 000000000..e7afd3188
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/language/tags.go
@@ -0,0 +1,48 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package language
+
+// MustParse is like Parse, but panics if the given BCP 47 tag cannot be parsed.
+// It simplifies safe initialization of Tag values.
+func MustParse(s string) Tag {
+ t, err := Parse(s)
+ if err != nil {
+ panic(err)
+ }
+ return t
+}
+
+// MustParseBase is like ParseBase, but panics if the given base cannot be parsed.
+// It simplifies safe initialization of Base values.
+func MustParseBase(s string) Language {
+ b, err := ParseBase(s)
+ if err != nil {
+ panic(err)
+ }
+ return b
+}
+
+// MustParseScript is like ParseScript, but panics if the given script cannot be
+// parsed. It simplifies safe initialization of Script values.
+func MustParseScript(s string) Script {
+ scr, err := ParseScript(s)
+ if err != nil {
+ panic(err)
+ }
+ return scr
+}
+
+// MustParseRegion is like ParseRegion, but panics if the given region cannot be
+// parsed. It simplifies safe initialization of Region values.
+func MustParseRegion(s string) Region {
+ r, err := ParseRegion(s)
+ if err != nil {
+ panic(err)
+ }
+ return r
+}
+
+// Und is the root language.
+var Und Tag
diff --git a/vendor/golang.org/x/text/internal/match.go b/vendor/golang.org/x/text/internal/match.go
new file mode 100644
index 000000000..1cc004a6d
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/match.go
@@ -0,0 +1,67 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package internal
+
+// This file contains matchers that implement CLDR inheritance.
+//
+// See https://unicode.org/reports/tr35/#Locale_Inheritance.
+//
+// Some of the inheritance described in this document is already handled by
+// the cldr package.
+
+import (
+ "golang.org/x/text/language"
+)
+
+// TODO: consider if (some of the) matching algorithm needs to be public after
+// getting some feel about what is generic and what is specific.
+
+// NewInheritanceMatcher returns a matcher that matches based on the inheritance
+// chain.
+//
+// The matcher uses canonicalization and the parent relationship to find a
+// match. The resulting match will always be either Und or a language with the
+// same language and script as the requested language. It will not match
+// languages for which there is understood to be mutual or one-directional
+// intelligibility.
+//
+// A Match will indicate an Exact match if the language matches after
+// canonicalization and High if the matched tag is a parent.
+func NewInheritanceMatcher(t []language.Tag) *InheritanceMatcher {
+ tags := &InheritanceMatcher{make(map[language.Tag]int)}
+ for i, tag := range t {
+ ct, err := language.All.Canonicalize(tag)
+ if err != nil {
+ ct = tag
+ }
+ tags.index[ct] = i
+ }
+ return tags
+}
+
+type InheritanceMatcher struct {
+ index map[language.Tag]int
+}
+
+func (m InheritanceMatcher) Match(want ...language.Tag) (language.Tag, int, language.Confidence) {
+ for _, t := range want {
+ ct, err := language.All.Canonicalize(t)
+ if err != nil {
+ ct = t
+ }
+ conf := language.Exact
+ for {
+ if index, ok := m.index[ct]; ok {
+ return ct, index, conf
+ }
+ if ct == language.Und {
+ break
+ }
+ ct = ct.Parent()
+ conf = language.High
+ }
+ }
+ return language.Und, 0, language.No
+}
diff --git a/vendor/golang.org/x/text/internal/number/common.go b/vendor/golang.org/x/text/internal/number/common.go
new file mode 100644
index 000000000..a6e9c8e0d
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/number/common.go
@@ -0,0 +1,55 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+package number
+
+import (
+ "unicode/utf8"
+
+ "golang.org/x/text/internal/language/compact"
+)
+
+// A system identifies a CLDR numbering system.
+type system byte
+
+type systemData struct {
+ id system
+ digitSize byte // number of UTF-8 bytes per digit
+ zero [utf8.UTFMax]byte // UTF-8 sequence of zero digit.
+}
+
+// A SymbolType identifies a symbol of a specific kind.
+type SymbolType int
+
+const (
+ SymDecimal SymbolType = iota
+ SymGroup
+ SymList
+ SymPercentSign
+ SymPlusSign
+ SymMinusSign
+ SymExponential
+ SymSuperscriptingExponent
+ SymPerMille
+ SymInfinity
+ SymNan
+ SymTimeSeparator
+
+ NumSymbolTypes
+)
+
+const hasNonLatnMask = 0x8000
+
+// symOffset is an offset into altSymData if the bit indicated by hasNonLatnMask
+// is not 0 (with this bit masked out), and an offset into symIndex otherwise.
+//
+// TODO: this type can be a byte again if we use an indirection into altsymData
+// and introduce an alt -> offset slice (the length of this will be number of
+// alternatives plus 1). This also allows getting rid of the compactTag field
+// in altSymData. In total this will save about 1K.
+type symOffset uint16
+
+type altSymData struct {
+ compactTag compact.ID
+ symIndex symOffset
+ system system
+}
diff --git a/vendor/golang.org/x/text/internal/number/decimal.go b/vendor/golang.org/x/text/internal/number/decimal.go
new file mode 100644
index 000000000..e128cf343
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/number/decimal.go
@@ -0,0 +1,500 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate stringer -type RoundingMode
+
+package number
+
+import (
+ "math"
+ "strconv"
+)
+
+// RoundingMode determines how a number is rounded to the desired precision.
+type RoundingMode byte
+
+const (
+ ToNearestEven RoundingMode = iota // towards the nearest integer, or towards an even number if equidistant.
+ ToNearestZero // towards the nearest integer, or towards zero if equidistant.
+ ToNearestAway // towards the nearest integer, or away from zero if equidistant.
+ ToPositiveInf // towards infinity
+ ToNegativeInf // towards negative infinity
+ ToZero // towards zero
+ AwayFromZero // away from zero
+ numModes
+)
+
+const maxIntDigits = 20
+
+// A Decimal represents a floating point number in decimal format.
+// Digits represents a number [0, 1.0), and the absolute value represented by
+// Decimal is Digits * 10^Exp. Leading and trailing zeros may be omitted and Exp
+// may point outside a valid position in Digits.
+//
+// Examples:
+//
+// Number Decimal
+// 12345 Digits: [1, 2, 3, 4, 5], Exp: 5
+// 12.345 Digits: [1, 2, 3, 4, 5], Exp: 2
+// 12000 Digits: [1, 2], Exp: 5
+// 12000.00 Digits: [1, 2], Exp: 5
+// 0.00123 Digits: [1, 2, 3], Exp: -2
+// 0 Digits: [], Exp: 0
+type Decimal struct {
+ digits
+
+ buf [maxIntDigits]byte
+}
+
+type digits struct {
+ Digits []byte // mantissa digits, big-endian
+ Exp int32 // exponent
+ Neg bool
+ Inf bool // Takes precedence over Digits and Exp.
+ NaN bool // Takes precedence over Inf.
+}
+
+// Digits represents a floating point number represented in digits of the
+// base in which a number is to be displayed. It is similar to Decimal, but
+// keeps track of trailing fraction zeros and the comma placement for
+// engineering notation. Digits must have at least one digit.
+//
+// Examples:
+//
+// Number Decimal
+// decimal
+// 12345 Digits: [1, 2, 3, 4, 5], Exp: 5 End: 5
+// 12.345 Digits: [1, 2, 3, 4, 5], Exp: 2 End: 5
+// 12000 Digits: [1, 2], Exp: 5 End: 5
+// 12000.00 Digits: [1, 2], Exp: 5 End: 7
+// 0.00123 Digits: [1, 2, 3], Exp: -2 End: 3
+// 0 Digits: [], Exp: 0 End: 1
+// scientific (actual exp is Exp - Comma)
+// 0e0 Digits: [0], Exp: 1, End: 1, Comma: 1
+// .0e0 Digits: [0], Exp: 0, End: 1, Comma: 0
+// 0.0e0 Digits: [0], Exp: 1, End: 2, Comma: 1
+// 1.23e4 Digits: [1, 2, 3], Exp: 5, End: 3, Comma: 1
+// .123e5 Digits: [1, 2, 3], Exp: 5, End: 3, Comma: 0
+// engineering
+// 12.3e3 Digits: [1, 2, 3], Exp: 5, End: 3, Comma: 2
+type Digits struct {
+ digits
+ // End indicates the end position of the number.
+ End int32 // For decimals Exp <= End. For scientific len(Digits) <= End.
+ // Comma is used for the comma position for scientific (always 0 or 1) and
+ // engineering notation (always 0, 1, 2, or 3).
+ Comma uint8
+ // IsScientific indicates whether this number is to be rendered as a
+ // scientific number.
+ IsScientific bool
+}
+
+func (d *Digits) NumFracDigits() int {
+ if d.Exp >= d.End {
+ return 0
+ }
+ return int(d.End - d.Exp)
+}
+
+// normalize returns a new Decimal with leading and trailing zeros removed.
+func (d *Decimal) normalize() (n Decimal) {
+ n = *d
+ b := n.Digits
+ // Strip leading zeros. Resulting number of digits is significant digits.
+ for len(b) > 0 && b[0] == 0 {
+ b = b[1:]
+ n.Exp--
+ }
+ // Strip trailing zeros
+ for len(b) > 0 && b[len(b)-1] == 0 {
+ b = b[:len(b)-1]
+ }
+ if len(b) == 0 {
+ n.Exp = 0
+ }
+ n.Digits = b
+ return n
+}
+
+func (d *Decimal) clear() {
+ b := d.Digits
+ if b == nil {
+ b = d.buf[:0]
+ }
+ *d = Decimal{}
+ d.Digits = b[:0]
+}
+
+func (x *Decimal) String() string {
+ if x.NaN {
+ return "NaN"
+ }
+ var buf []byte
+ if x.Neg {
+ buf = append(buf, '-')
+ }
+ if x.Inf {
+ buf = append(buf, "Inf"...)
+ return string(buf)
+ }
+ switch {
+ case len(x.Digits) == 0:
+ buf = append(buf, '0')
+ case x.Exp <= 0:
+ // 0.00ddd
+ buf = append(buf, "0."...)
+ buf = appendZeros(buf, -int(x.Exp))
+ buf = appendDigits(buf, x.Digits)
+
+ case /* 0 < */ int(x.Exp) < len(x.Digits):
+ // dd.ddd
+ buf = appendDigits(buf, x.Digits[:x.Exp])
+ buf = append(buf, '.')
+ buf = appendDigits(buf, x.Digits[x.Exp:])
+
+ default: // len(x.Digits) <= x.Exp
+ // ddd00
+ buf = appendDigits(buf, x.Digits)
+ buf = appendZeros(buf, int(x.Exp)-len(x.Digits))
+ }
+ return string(buf)
+}
+
+func appendDigits(buf []byte, digits []byte) []byte {
+ for _, c := range digits {
+ buf = append(buf, c+'0')
+ }
+ return buf
+}
+
+// appendZeros appends n 0 digits to buf and returns buf.
+func appendZeros(buf []byte, n int) []byte {
+ for ; n > 0; n-- {
+ buf = append(buf, '0')
+ }
+ return buf
+}
+
+func (d *digits) round(mode RoundingMode, n int) {
+ if n >= len(d.Digits) {
+ return
+ }
+ // Make rounding decision: The result mantissa is truncated ("rounded down")
+ // by default. Decide if we need to increment, or "round up", the (unsigned)
+ // mantissa.
+ inc := false
+ switch mode {
+ case ToNegativeInf:
+ inc = d.Neg
+ case ToPositiveInf:
+ inc = !d.Neg
+ case ToZero:
+ // nothing to do
+ case AwayFromZero:
+ inc = true
+ case ToNearestEven:
+ inc = d.Digits[n] > 5 || d.Digits[n] == 5 &&
+ (len(d.Digits) > n+1 || n == 0 || d.Digits[n-1]&1 != 0)
+ case ToNearestAway:
+ inc = d.Digits[n] >= 5
+ case ToNearestZero:
+ inc = d.Digits[n] > 5 || d.Digits[n] == 5 && len(d.Digits) > n+1
+ default:
+ panic("unreachable")
+ }
+ if inc {
+ d.roundUp(n)
+ } else {
+ d.roundDown(n)
+ }
+}
+
+// roundFloat rounds a floating point number.
+func (r RoundingMode) roundFloat(x float64) float64 {
+ // Make rounding decision: The result mantissa is truncated ("rounded down")
+ // by default. Decide if we need to increment, or "round up", the (unsigned)
+ // mantissa.
+ abs := x
+ if x < 0 {
+ abs = -x
+ }
+ i, f := math.Modf(abs)
+ if f == 0.0 {
+ return x
+ }
+ inc := false
+ switch r {
+ case ToNegativeInf:
+ inc = x < 0
+ case ToPositiveInf:
+ inc = x >= 0
+ case ToZero:
+ // nothing to do
+ case AwayFromZero:
+ inc = true
+ case ToNearestEven:
+ // TODO: check overflow
+ inc = f > 0.5 || f == 0.5 && int64(i)&1 != 0
+ case ToNearestAway:
+ inc = f >= 0.5
+ case ToNearestZero:
+ inc = f > 0.5
+ default:
+ panic("unreachable")
+ }
+ if inc {
+ i += 1
+ }
+ if abs != x {
+ i = -i
+ }
+ return i
+}
+
+func (x *digits) roundUp(n int) {
+ if n < 0 || n >= len(x.Digits) {
+ return // nothing to do
+ }
+ // find first digit < 9
+ for n > 0 && x.Digits[n-1] >= 9 {
+ n--
+ }
+
+ if n == 0 {
+ // all digits are 9s => round up to 1 and update exponent
+ x.Digits[0] = 1 // ok since len(x.Digits) > n
+ x.Digits = x.Digits[:1]
+ x.Exp++
+ return
+ }
+ x.Digits[n-1]++
+ x.Digits = x.Digits[:n]
+ // x already trimmed
+}
+
+func (x *digits) roundDown(n int) {
+ if n < 0 || n >= len(x.Digits) {
+ return // nothing to do
+ }
+ x.Digits = x.Digits[:n]
+ trim(x)
+}
+
+// trim cuts off any trailing zeros from x's mantissa;
+// they are meaningless for the value of x.
+func trim(x *digits) {
+ i := len(x.Digits)
+ for i > 0 && x.Digits[i-1] == 0 {
+ i--
+ }
+ x.Digits = x.Digits[:i]
+ if i == 0 {
+ x.Exp = 0
+ }
+}
+
+// A Converter converts a number into decimals according to the given rounding
+// criteria.
+type Converter interface {
+ Convert(d *Decimal, r RoundingContext)
+}
+
+const (
+ signed = true
+ unsigned = false
+)
+
+// Convert converts the given number to the decimal representation using the
+// supplied RoundingContext.
+func (d *Decimal) Convert(r RoundingContext, number interface{}) {
+ switch f := number.(type) {
+ case Converter:
+ d.clear()
+ f.Convert(d, r)
+ case float32:
+ d.ConvertFloat(r, float64(f), 32)
+ case float64:
+ d.ConvertFloat(r, f, 64)
+ case int:
+ d.ConvertInt(r, signed, uint64(f))
+ case int8:
+ d.ConvertInt(r, signed, uint64(f))
+ case int16:
+ d.ConvertInt(r, signed, uint64(f))
+ case int32:
+ d.ConvertInt(r, signed, uint64(f))
+ case int64:
+ d.ConvertInt(r, signed, uint64(f))
+ case uint:
+ d.ConvertInt(r, unsigned, uint64(f))
+ case uint8:
+ d.ConvertInt(r, unsigned, uint64(f))
+ case uint16:
+ d.ConvertInt(r, unsigned, uint64(f))
+ case uint32:
+ d.ConvertInt(r, unsigned, uint64(f))
+ case uint64:
+ d.ConvertInt(r, unsigned, f)
+
+ default:
+ d.NaN = true
+ // TODO:
+ // case string: if produced by strconv, allows for easy arbitrary pos.
+ // case reflect.Value:
+ // case big.Float
+ // case big.Int
+ // case big.Rat?
+ // catch underlyings using reflect or will this already be done by the
+ // message package?
+ }
+}
+
+// ConvertInt converts an integer to decimals.
+func (d *Decimal) ConvertInt(r RoundingContext, signed bool, x uint64) {
+ if r.Increment > 0 {
+ // TODO: if uint64 is too large, fall back to float64
+ if signed {
+ d.ConvertFloat(r, float64(int64(x)), 64)
+ } else {
+ d.ConvertFloat(r, float64(x), 64)
+ }
+ return
+ }
+ d.clear()
+ if signed && int64(x) < 0 {
+ x = uint64(-int64(x))
+ d.Neg = true
+ }
+ d.fillIntDigits(x)
+ d.Exp = int32(len(d.Digits))
+}
+
+// ConvertFloat converts a floating point number to decimals.
+func (d *Decimal) ConvertFloat(r RoundingContext, x float64, size int) {
+ d.clear()
+ if math.IsNaN(x) {
+ d.NaN = true
+ return
+ }
+ // Simple case: decimal notation
+ if r.Increment > 0 {
+ scale := int(r.IncrementScale)
+ mult := 1.0
+ if scale >= len(scales) {
+ mult = math.Pow(10, float64(scale))
+ } else {
+ mult = scales[scale]
+ }
+ // We multiply x instead of dividing inc as it gives less rounding
+ // issues.
+ x *= mult
+ x /= float64(r.Increment)
+ x = r.Mode.roundFloat(x)
+ x *= float64(r.Increment)
+ x /= mult
+ }
+
+ abs := x
+ if x < 0 {
+ d.Neg = true
+ abs = -x
+ }
+ if math.IsInf(abs, 1) {
+ d.Inf = true
+ return
+ }
+
+ // By default we get the exact decimal representation.
+ verb := byte('g')
+ prec := -1
+ // As the strconv API does not return the rounding accuracy, we can only
+ // round using ToNearestEven.
+ if r.Mode == ToNearestEven {
+ if n := r.RoundSignificantDigits(); n >= 0 {
+ prec = n
+ } else if n = r.RoundFractionDigits(); n >= 0 {
+ prec = n
+ verb = 'f'
+ }
+ } else {
+ // TODO: At this point strconv's rounding is imprecise to the point that
+ // it is not usable for this purpose.
+ // See https://github.com/golang/go/issues/21714
+ // If rounding is requested, we ask for a large number of digits and
+ // round from there to simulate rounding only once.
+ // Ideally we would have strconv export an AppendDigits that would take
+ // a rounding mode and/or return an accuracy. Something like this would
+ // work:
+ // AppendDigits(dst []byte, x float64, base, size, prec int) (digits []byte, exp, accuracy int)
+ hasPrec := r.RoundSignificantDigits() >= 0
+ hasScale := r.RoundFractionDigits() >= 0
+ if hasPrec || hasScale {
+ // prec is the number of mantissa bits plus some extra for safety.
+ // We need at least the number of mantissa bits as decimals to
+ // accurately represent the floating point without rounding, as each
+ // bit requires one more decimal to represent: 0.5, 0.25, 0.125, ...
+ prec = 60
+ }
+ }
+
+ b := strconv.AppendFloat(d.Digits[:0], abs, verb, prec, size)
+ i := 0
+ k := 0
+ beforeDot := 1
+ for i < len(b) {
+ if c := b[i]; '0' <= c && c <= '9' {
+ b[k] = c - '0'
+ k++
+ d.Exp += int32(beforeDot)
+ } else if c == '.' {
+ beforeDot = 0
+ d.Exp = int32(k)
+ } else {
+ break
+ }
+ i++
+ }
+ d.Digits = b[:k]
+ if i != len(b) {
+ i += len("e")
+ pSign := i
+ exp := 0
+ for i++; i < len(b); i++ {
+ exp *= 10
+ exp += int(b[i] - '0')
+ }
+ if b[pSign] == '-' {
+ exp = -exp
+ }
+ d.Exp = int32(exp) + 1
+ }
+}
+
+func (d *Decimal) fillIntDigits(x uint64) {
+ if cap(d.Digits) < maxIntDigits {
+ d.Digits = d.buf[:]
+ } else {
+ d.Digits = d.buf[:maxIntDigits]
+ }
+ i := 0
+ for ; x > 0; x /= 10 {
+ d.Digits[i] = byte(x % 10)
+ i++
+ }
+ d.Digits = d.Digits[:i]
+ for p := 0; p < i; p++ {
+ i--
+ d.Digits[p], d.Digits[i] = d.Digits[i], d.Digits[p]
+ }
+}
+
+var scales [70]float64
+
+func init() {
+ x := 1.0
+ for i := range scales {
+ scales[i] = x
+ x *= 10
+ }
+}
diff --git a/vendor/golang.org/x/text/internal/number/format.go b/vendor/golang.org/x/text/internal/number/format.go
new file mode 100644
index 000000000..1aadcf407
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/number/format.go
@@ -0,0 +1,533 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package number
+
+import (
+ "strconv"
+ "unicode/utf8"
+
+ "golang.org/x/text/language"
+)
+
+// TODO:
+// - grouping of fractions
+// - allow user-defined superscript notation (such as 4)
+// - same for non-breaking spaces, like
+
+// A VisibleDigits computes digits, comma placement and trailing zeros as they
+// will be shown to the user.
+type VisibleDigits interface {
+ Digits(buf []byte, t language.Tag, scale int) Digits
+ // TODO: Do we also need to add the verb or pass a format.State?
+}
+
+// Formatting proceeds along the following lines:
+// 0) Compose rounding information from format and context.
+// 1) Convert a number into a Decimal.
+// 2) Sanitize Decimal by adding trailing zeros, removing leading digits, and
+// (non-increment) rounding. The Decimal that results from this is suitable
+// for determining the plural form.
+// 3) Render the Decimal in the localized form.
+
+// Formatter contains all the information needed to render a number.
+type Formatter struct {
+ Pattern
+ Info
+}
+
+func (f *Formatter) init(t language.Tag, index []uint8) {
+ f.Info = InfoFromTag(t)
+ f.Pattern = formats[index[tagToID(t)]]
+}
+
+// InitPattern initializes a Formatter for the given Pattern.
+func (f *Formatter) InitPattern(t language.Tag, pat *Pattern) {
+ f.Info = InfoFromTag(t)
+ f.Pattern = *pat
+}
+
+// InitDecimal initializes a Formatter using the default Pattern for the given
+// language.
+func (f *Formatter) InitDecimal(t language.Tag) {
+ f.init(t, tagToDecimal)
+}
+
+// InitScientific initializes a Formatter using the default Pattern for the
+// given language.
+func (f *Formatter) InitScientific(t language.Tag) {
+ f.init(t, tagToScientific)
+ f.Pattern.MinFractionDigits = 0
+ f.Pattern.MaxFractionDigits = -1
+}
+
+// InitEngineering initializes a Formatter using the default Pattern for the
+// given language.
+func (f *Formatter) InitEngineering(t language.Tag) {
+ f.init(t, tagToScientific)
+ f.Pattern.MinFractionDigits = 0
+ f.Pattern.MaxFractionDigits = -1
+ f.Pattern.MaxIntegerDigits = 3
+ f.Pattern.MinIntegerDigits = 1
+}
+
+// InitPercent initializes a Formatter using the default Pattern for the given
+// language.
+func (f *Formatter) InitPercent(t language.Tag) {
+ f.init(t, tagToPercent)
+}
+
+// InitPerMille initializes a Formatter using the default Pattern for the given
+// language.
+func (f *Formatter) InitPerMille(t language.Tag) {
+ f.init(t, tagToPercent)
+ f.Pattern.DigitShift = 3
+}
+
+func (f *Formatter) Append(dst []byte, x interface{}) []byte {
+ var d Decimal
+ r := f.RoundingContext
+ d.Convert(r, x)
+ return f.Render(dst, FormatDigits(&d, r))
+}
+
+func FormatDigits(d *Decimal, r RoundingContext) Digits {
+ if r.isScientific() {
+ return scientificVisibleDigits(r, d)
+ }
+ return decimalVisibleDigits(r, d)
+}
+
+func (f *Formatter) Format(dst []byte, d *Decimal) []byte {
+ return f.Render(dst, FormatDigits(d, f.RoundingContext))
+}
+
+func (f *Formatter) Render(dst []byte, d Digits) []byte {
+ var result []byte
+ var postPrefix, preSuffix int
+ if d.IsScientific {
+ result, postPrefix, preSuffix = appendScientific(dst, f, &d)
+ } else {
+ result, postPrefix, preSuffix = appendDecimal(dst, f, &d)
+ }
+ if f.PadRune == 0 {
+ return result
+ }
+ width := int(f.FormatWidth)
+ if count := utf8.RuneCount(result); count < width {
+ insertPos := 0
+ switch f.Flags & PadMask {
+ case PadAfterPrefix:
+ insertPos = postPrefix
+ case PadBeforeSuffix:
+ insertPos = preSuffix
+ case PadAfterSuffix:
+ insertPos = len(result)
+ }
+ num := width - count
+ pad := [utf8.UTFMax]byte{' '}
+ sz := 1
+ if r := f.PadRune; r != 0 {
+ sz = utf8.EncodeRune(pad[:], r)
+ }
+ extra := sz * num
+ if n := len(result) + extra; n < cap(result) {
+ result = result[:n]
+ copy(result[insertPos+extra:], result[insertPos:])
+ } else {
+ buf := make([]byte, n)
+ copy(buf, result[:insertPos])
+ copy(buf[insertPos+extra:], result[insertPos:])
+ result = buf
+ }
+ for ; num > 0; num-- {
+ insertPos += copy(result[insertPos:], pad[:sz])
+ }
+ }
+ return result
+}
+
+// decimalVisibleDigits converts d according to the RoundingContext. Note that
+// the exponent may change as a result of this operation.
+func decimalVisibleDigits(r RoundingContext, d *Decimal) Digits {
+ if d.NaN || d.Inf {
+ return Digits{digits: digits{Neg: d.Neg, NaN: d.NaN, Inf: d.Inf}}
+ }
+ n := Digits{digits: d.normalize().digits}
+
+ exp := n.Exp
+ exp += int32(r.DigitShift)
+
+ // Cap integer digits. Remove *most-significant* digits.
+ if r.MaxIntegerDigits > 0 {
+ if p := int(exp) - int(r.MaxIntegerDigits); p > 0 {
+ if p > len(n.Digits) {
+ p = len(n.Digits)
+ }
+ if n.Digits = n.Digits[p:]; len(n.Digits) == 0 {
+ exp = 0
+ } else {
+ exp -= int32(p)
+ }
+ // Strip leading zeros.
+ for len(n.Digits) > 0 && n.Digits[0] == 0 {
+ n.Digits = n.Digits[1:]
+ exp--
+ }
+ }
+ }
+
+ // Rounding if not already done by Convert.
+ p := len(n.Digits)
+ if maxSig := int(r.MaxSignificantDigits); maxSig > 0 {
+ p = maxSig
+ }
+ if maxFrac := int(r.MaxFractionDigits); maxFrac >= 0 {
+ if cap := int(exp) + maxFrac; cap < p {
+ p = int(exp) + maxFrac
+ }
+ if p < 0 {
+ p = 0
+ }
+ }
+ n.round(r.Mode, p)
+
+ // set End (trailing zeros)
+ n.End = int32(len(n.Digits))
+ if n.End == 0 {
+ exp = 0
+ if r.MinFractionDigits > 0 {
+ n.End = int32(r.MinFractionDigits)
+ }
+ if p := int32(r.MinSignificantDigits) - 1; p > n.End {
+ n.End = p
+ }
+ } else {
+ if end := exp + int32(r.MinFractionDigits); end > n.End {
+ n.End = end
+ }
+ if n.End < int32(r.MinSignificantDigits) {
+ n.End = int32(r.MinSignificantDigits)
+ }
+ }
+ n.Exp = exp
+ return n
+}
+
+// appendDecimal appends a formatted number to dst. It returns two possible
+// insertion points for padding.
+func appendDecimal(dst []byte, f *Formatter, n *Digits) (b []byte, postPre, preSuf int) {
+ if dst, ok := f.renderSpecial(dst, n); ok {
+ return dst, 0, len(dst)
+ }
+ digits := n.Digits
+ exp := n.Exp
+
+ // Split in integer and fraction part.
+ var intDigits, fracDigits []byte
+ numInt := 0
+ numFrac := int(n.End - n.Exp)
+ if exp > 0 {
+ numInt = int(exp)
+ if int(exp) >= len(digits) { // ddddd | ddddd00
+ intDigits = digits
+ } else { // ddd.dd
+ intDigits = digits[:exp]
+ fracDigits = digits[exp:]
+ }
+ } else {
+ fracDigits = digits
+ }
+
+ neg := n.Neg
+ affix, suffix := f.getAffixes(neg)
+ dst = appendAffix(dst, f, affix, neg)
+ savedLen := len(dst)
+
+ minInt := int(f.MinIntegerDigits)
+ if minInt == 0 && f.MinSignificantDigits > 0 {
+ minInt = 1
+ }
+ // add leading zeros
+ for i := minInt; i > numInt; i-- {
+ dst = f.AppendDigit(dst, 0)
+ if f.needsSep(i) {
+ dst = append(dst, f.Symbol(SymGroup)...)
+ }
+ }
+ i := 0
+ for ; i < len(intDigits); i++ {
+ dst = f.AppendDigit(dst, intDigits[i])
+ if f.needsSep(numInt - i) {
+ dst = append(dst, f.Symbol(SymGroup)...)
+ }
+ }
+ for ; i < numInt; i++ {
+ dst = f.AppendDigit(dst, 0)
+ if f.needsSep(numInt - i) {
+ dst = append(dst, f.Symbol(SymGroup)...)
+ }
+ }
+
+ if numFrac > 0 || f.Flags&AlwaysDecimalSeparator != 0 {
+ dst = append(dst, f.Symbol(SymDecimal)...)
+ }
+ // Add trailing zeros
+ i = 0
+ for n := -int(n.Exp); i < n; i++ {
+ dst = f.AppendDigit(dst, 0)
+ }
+ for _, d := range fracDigits {
+ i++
+ dst = f.AppendDigit(dst, d)
+ }
+ for ; i < numFrac; i++ {
+ dst = f.AppendDigit(dst, 0)
+ }
+ return appendAffix(dst, f, suffix, neg), savedLen, len(dst)
+}
+
+func scientificVisibleDigits(r RoundingContext, d *Decimal) Digits {
+ if d.NaN || d.Inf {
+ return Digits{digits: digits{Neg: d.Neg, NaN: d.NaN, Inf: d.Inf}}
+ }
+ n := Digits{digits: d.normalize().digits, IsScientific: true}
+
+ // Normalize to have at least one digit. This simplifies engineering
+ // notation.
+ if len(n.Digits) == 0 {
+ n.Digits = append(n.Digits, 0)
+ n.Exp = 1
+ }
+
+ // Significant digits are transformed by the parser for scientific notation
+ // and do not need to be handled here.
+ maxInt, numInt := int(r.MaxIntegerDigits), int(r.MinIntegerDigits)
+ if numInt == 0 {
+ numInt = 1
+ }
+
+ // If a maximum number of integers is specified, the minimum must be 1
+ // and the exponent is grouped by this number (e.g. for engineering)
+ if maxInt > numInt {
+ // Correct the exponent to reflect a single integer digit.
+ numInt = 1
+ // engineering
+ // 0.01234 ([12345]e-1) -> 1.2345e-2 12.345e-3
+ // 12345 ([12345]e+5) -> 1.2345e4 12.345e3
+ d := int(n.Exp-1) % maxInt
+ if d < 0 {
+ d += maxInt
+ }
+ numInt += d
+ }
+
+ p := len(n.Digits)
+ if maxSig := int(r.MaxSignificantDigits); maxSig > 0 {
+ p = maxSig
+ }
+ if maxFrac := int(r.MaxFractionDigits); maxFrac >= 0 && numInt+maxFrac < p {
+ p = numInt + maxFrac
+ }
+ n.round(r.Mode, p)
+
+ n.Comma = uint8(numInt)
+ n.End = int32(len(n.Digits))
+ if minSig := int32(r.MinFractionDigits) + int32(numInt); n.End < minSig {
+ n.End = minSig
+ }
+ return n
+}
+
+// appendScientific appends a formatted number to dst. It returns two possible
+// insertion points for padding.
+func appendScientific(dst []byte, f *Formatter, n *Digits) (b []byte, postPre, preSuf int) {
+ if dst, ok := f.renderSpecial(dst, n); ok {
+ return dst, 0, 0
+ }
+ digits := n.Digits
+ numInt := int(n.Comma)
+ numFrac := int(n.End) - int(n.Comma)
+
+ var intDigits, fracDigits []byte
+ if numInt <= len(digits) {
+ intDigits = digits[:numInt]
+ fracDigits = digits[numInt:]
+ } else {
+ intDigits = digits
+ }
+ neg := n.Neg
+ affix, suffix := f.getAffixes(neg)
+ dst = appendAffix(dst, f, affix, neg)
+ savedLen := len(dst)
+
+ i := 0
+ for ; i < len(intDigits); i++ {
+ dst = f.AppendDigit(dst, intDigits[i])
+ if f.needsSep(numInt - i) {
+ dst = append(dst, f.Symbol(SymGroup)...)
+ }
+ }
+ for ; i < numInt; i++ {
+ dst = f.AppendDigit(dst, 0)
+ if f.needsSep(numInt - i) {
+ dst = append(dst, f.Symbol(SymGroup)...)
+ }
+ }
+
+ if numFrac > 0 || f.Flags&AlwaysDecimalSeparator != 0 {
+ dst = append(dst, f.Symbol(SymDecimal)...)
+ }
+ i = 0
+ for ; i < len(fracDigits); i++ {
+ dst = f.AppendDigit(dst, fracDigits[i])
+ }
+ for ; i < numFrac; i++ {
+ dst = f.AppendDigit(dst, 0)
+ }
+
+ // exp
+ buf := [12]byte{}
+ // TODO: use exponential if superscripting is not available (no Latin
+ // numbers or no tags) and use exponential in all other cases.
+ exp := n.Exp - int32(n.Comma)
+ exponential := f.Symbol(SymExponential)
+ if exponential == "E" {
+ dst = append(dst, f.Symbol(SymSuperscriptingExponent)...)
+ dst = f.AppendDigit(dst, 1)
+ dst = f.AppendDigit(dst, 0)
+ switch {
+ case exp < 0:
+ dst = append(dst, superMinus...)
+ exp = -exp
+ case f.Flags&AlwaysExpSign != 0:
+ dst = append(dst, superPlus...)
+ }
+ b = strconv.AppendUint(buf[:0], uint64(exp), 10)
+ for i := len(b); i < int(f.MinExponentDigits); i++ {
+ dst = append(dst, superDigits[0]...)
+ }
+ for _, c := range b {
+ dst = append(dst, superDigits[c-'0']...)
+ }
+ } else {
+ dst = append(dst, exponential...)
+ switch {
+ case exp < 0:
+ dst = append(dst, f.Symbol(SymMinusSign)...)
+ exp = -exp
+ case f.Flags&AlwaysExpSign != 0:
+ dst = append(dst, f.Symbol(SymPlusSign)...)
+ }
+ b = strconv.AppendUint(buf[:0], uint64(exp), 10)
+ for i := len(b); i < int(f.MinExponentDigits); i++ {
+ dst = f.AppendDigit(dst, 0)
+ }
+ for _, c := range b {
+ dst = f.AppendDigit(dst, c-'0')
+ }
+ }
+ return appendAffix(dst, f, suffix, neg), savedLen, len(dst)
+}
+
+const (
+ superMinus = "\u207B" // SUPERSCRIPT HYPHEN-MINUS
+ superPlus = "\u207A" // SUPERSCRIPT PLUS SIGN
+)
+
+var (
+ // Note: the digits are not sequential!!!
+ superDigits = []string{
+ "\u2070", // SUPERSCRIPT DIGIT ZERO
+ "\u00B9", // SUPERSCRIPT DIGIT ONE
+ "\u00B2", // SUPERSCRIPT DIGIT TWO
+ "\u00B3", // SUPERSCRIPT DIGIT THREE
+ "\u2074", // SUPERSCRIPT DIGIT FOUR
+ "\u2075", // SUPERSCRIPT DIGIT FIVE
+ "\u2076", // SUPERSCRIPT DIGIT SIX
+ "\u2077", // SUPERSCRIPT DIGIT SEVEN
+ "\u2078", // SUPERSCRIPT DIGIT EIGHT
+ "\u2079", // SUPERSCRIPT DIGIT NINE
+ }
+)
+
+func (f *Formatter) getAffixes(neg bool) (affix, suffix string) {
+ str := f.Affix
+ if str != "" {
+ if f.NegOffset > 0 {
+ if neg {
+ str = str[f.NegOffset:]
+ } else {
+ str = str[:f.NegOffset]
+ }
+ }
+ sufStart := 1 + str[0]
+ affix = str[1:sufStart]
+ suffix = str[sufStart+1:]
+ }
+ // TODO: introduce a NeedNeg sign to indicate if the left pattern already
+ // has a sign marked?
+ if f.NegOffset == 0 && (neg || f.Flags&AlwaysSign != 0) {
+ affix = "-" + affix
+ }
+ return affix, suffix
+}
+
+func (f *Formatter) renderSpecial(dst []byte, d *Digits) (b []byte, ok bool) {
+ if d.NaN {
+ return fmtNaN(dst, f), true
+ }
+ if d.Inf {
+ return fmtInfinite(dst, f, d), true
+ }
+ return dst, false
+}
+
+func fmtNaN(dst []byte, f *Formatter) []byte {
+ return append(dst, f.Symbol(SymNan)...)
+}
+
+func fmtInfinite(dst []byte, f *Formatter, d *Digits) []byte {
+ affix, suffix := f.getAffixes(d.Neg)
+ dst = appendAffix(dst, f, affix, d.Neg)
+ dst = append(dst, f.Symbol(SymInfinity)...)
+ dst = appendAffix(dst, f, suffix, d.Neg)
+ return dst
+}
+
+func appendAffix(dst []byte, f *Formatter, affix string, neg bool) []byte {
+ quoting := false
+ escaping := false
+ for _, r := range affix {
+ switch {
+ case escaping:
+ // escaping occurs both inside and outside of quotes
+ dst = append(dst, string(r)...)
+ escaping = false
+ case r == '\\':
+ escaping = true
+ case r == '\'':
+ quoting = !quoting
+ case quoting:
+ dst = append(dst, string(r)...)
+ case r == '%':
+ if f.DigitShift == 3 {
+ dst = append(dst, f.Symbol(SymPerMille)...)
+ } else {
+ dst = append(dst, f.Symbol(SymPercentSign)...)
+ }
+ case r == '-' || r == '+':
+ if neg {
+ dst = append(dst, f.Symbol(SymMinusSign)...)
+ } else if f.Flags&ElideSign == 0 {
+ dst = append(dst, f.Symbol(SymPlusSign)...)
+ } else {
+ dst = append(dst, ' ')
+ }
+ default:
+ dst = append(dst, string(r)...)
+ }
+ }
+ return dst
+}
diff --git a/vendor/golang.org/x/text/internal/number/number.go b/vendor/golang.org/x/text/internal/number/number.go
new file mode 100644
index 000000000..e1d933c3f
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/number/number.go
@@ -0,0 +1,152 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate go run gen.go gen_common.go
+
+// Package number contains tools and data for formatting numbers.
+package number
+
+import (
+ "unicode/utf8"
+
+ "golang.org/x/text/internal/language/compact"
+ "golang.org/x/text/language"
+)
+
+// Info holds number formatting configuration data.
+type Info struct {
+ system systemData // numbering system information
+ symIndex symOffset // index to symbols
+}
+
+// InfoFromLangID returns a Info for the given compact language identifier and
+// numbering system identifier. If system is the empty string, the default
+// numbering system will be taken for that language.
+func InfoFromLangID(compactIndex compact.ID, numberSystem string) Info {
+ p := langToDefaults[compactIndex]
+ // Lookup the entry for the language.
+ pSymIndex := symOffset(0) // Default: Latin, default symbols
+ system, ok := systemMap[numberSystem]
+ if !ok {
+ // Take the value for the default numbering system. This is by far the
+ // most common case as an alternative numbering system is hardly used.
+ if p&hasNonLatnMask == 0 { // Latn digits.
+ pSymIndex = p
+ } else { // Non-Latn or multiple numbering systems.
+ // Take the first entry from the alternatives list.
+ data := langToAlt[p&^hasNonLatnMask]
+ pSymIndex = data.symIndex
+ system = data.system
+ }
+ } else {
+ langIndex := compactIndex
+ ns := system
+ outerLoop:
+ for ; ; p = langToDefaults[langIndex] {
+ if p&hasNonLatnMask == 0 {
+ if ns == 0 {
+ // The index directly points to the symbol data.
+ pSymIndex = p
+ break
+ }
+ // Move to the parent and retry.
+ langIndex = langIndex.Parent()
+ } else {
+ // The index points to a list of symbol data indexes.
+ for _, e := range langToAlt[p&^hasNonLatnMask:] {
+ if e.compactTag != langIndex {
+ if langIndex == 0 {
+ // The CLDR root defines full symbol information for
+ // all numbering systems (even though mostly by
+ // means of aliases). Fall back to the default entry
+ // for Latn if there is no data for the numbering
+ // system of this language.
+ if ns == 0 {
+ break
+ }
+ // Fall back to Latin and start from the original
+ // language. See
+ // https://unicode.org/reports/tr35/#Locale_Inheritance.
+ ns = numLatn
+ langIndex = compactIndex
+ continue outerLoop
+ }
+ // Fall back to parent.
+ langIndex = langIndex.Parent()
+ } else if e.system == ns {
+ pSymIndex = e.symIndex
+ break outerLoop
+ }
+ }
+ }
+ }
+ }
+ if int(system) >= len(numSysData) { // algorithmic
+ // Will generate ASCII digits in case the user inadvertently calls
+ // WriteDigit or Digit on it.
+ d := numSysData[0]
+ d.id = system
+ return Info{
+ system: d,
+ symIndex: pSymIndex,
+ }
+ }
+ return Info{
+ system: numSysData[system],
+ symIndex: pSymIndex,
+ }
+}
+
+// InfoFromTag returns a Info for the given language tag.
+func InfoFromTag(t language.Tag) Info {
+ return InfoFromLangID(tagToID(t), t.TypeForKey("nu"))
+}
+
+// IsDecimal reports if the numbering system can convert decimal to native
+// symbols one-to-one.
+func (n Info) IsDecimal() bool {
+ return int(n.system.id) < len(numSysData)
+}
+
+// WriteDigit writes the UTF-8 sequence for n corresponding to the given ASCII
+// digit to dst and reports the number of bytes written. dst must be large
+// enough to hold the rune (can be up to utf8.UTFMax bytes).
+func (n Info) WriteDigit(dst []byte, asciiDigit rune) int {
+ copy(dst, n.system.zero[:n.system.digitSize])
+ dst[n.system.digitSize-1] += byte(asciiDigit - '0')
+ return int(n.system.digitSize)
+}
+
+// AppendDigit appends the UTF-8 sequence for n corresponding to the given digit
+// to dst and reports the number of bytes written. dst must be large enough to
+// hold the rune (can be up to utf8.UTFMax bytes).
+func (n Info) AppendDigit(dst []byte, digit byte) []byte {
+ dst = append(dst, n.system.zero[:n.system.digitSize]...)
+ dst[len(dst)-1] += digit
+ return dst
+}
+
+// Digit returns the digit for the numbering system for the corresponding ASCII
+// value. For example, ni.Digit('3') could return '三'. Note that the argument
+// is the rune constant '3', which equals 51, not the integer constant 3.
+func (n Info) Digit(asciiDigit rune) rune {
+ var x [utf8.UTFMax]byte
+ n.WriteDigit(x[:], asciiDigit)
+ r, _ := utf8.DecodeRune(x[:])
+ return r
+}
+
+// Symbol returns the string for the given symbol type.
+func (n Info) Symbol(t SymbolType) string {
+ return symData.Elem(int(symIndex[n.symIndex][t]))
+}
+
+func formatForLang(t language.Tag, index []byte) *Pattern {
+ return &formats[index[tagToID(t)]]
+}
+
+func tagToID(t language.Tag) compact.ID {
+ id, _ := compact.RegionalID(compact.Tag(t))
+ return id
+}
diff --git a/vendor/golang.org/x/text/internal/number/pattern.go b/vendor/golang.org/x/text/internal/number/pattern.go
new file mode 100644
index 000000000..06e59559a
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/number/pattern.go
@@ -0,0 +1,485 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package number
+
+import (
+ "errors"
+ "unicode/utf8"
+)
+
+// This file contains a parser for the CLDR number patterns as described in
+// https://unicode.org/reports/tr35/tr35-numbers.html#Number_Format_Patterns.
+//
+// The following BNF is derived from this standard.
+//
+// pattern := subpattern (';' subpattern)?
+// subpattern := affix? number exponent? affix?
+// number := decimal | sigDigits
+// decimal := '#'* '0'* ('.' fraction)? | '#' | '0'
+// fraction := '0'* '#'*
+// sigDigits := '#'* '@' '@'* '#'*
+// exponent := 'E' '+'? '0'* '0'
+// padSpec := '*' \L
+//
+// Notes:
+// - An affix pattern may contain any runes, but runes with special meaning
+// should be escaped.
+// - Sequences of digits, '#', and '@' in decimal and sigDigits may have
+// interstitial commas.
+
+// TODO: replace special characters in affixes (-, +, ¤) with control codes.
+
+// Pattern holds information for formatting numbers. It is designed to hold
+// information from CLDR number patterns.
+//
+// This pattern is precompiled for all patterns for all languages. Even though
+// the number of patterns is not very large, we want to keep this small.
+//
+// This type is only intended for internal use.
+type Pattern struct {
+ RoundingContext
+
+ Affix string // includes prefix and suffix. First byte is prefix length.
+ Offset uint16 // Offset into Affix for prefix and suffix
+ NegOffset uint16 // Offset into Affix for negative prefix and suffix or 0.
+ PadRune rune
+ FormatWidth uint16
+
+ GroupingSize [2]uint8
+ Flags PatternFlag
+}
+
+// A RoundingContext indicates how a number should be converted to digits.
+// It contains all information needed to determine the "visible digits" as
+// required by the pluralization rules.
+type RoundingContext struct {
+ // TODO: unify these two fields so that there is a more unambiguous meaning
+ // of how precision is handled.
+ MaxSignificantDigits int16 // -1 is unlimited
+ MaxFractionDigits int16 // -1 is unlimited
+
+ Increment uint32
+ IncrementScale uint8 // May differ from printed scale.
+
+ Mode RoundingMode
+
+ DigitShift uint8 // Number of decimals to shift. Used for % and ‰.
+
+ // Number of digits.
+ MinIntegerDigits uint8
+
+ MaxIntegerDigits uint8
+ MinFractionDigits uint8
+ MinSignificantDigits uint8
+
+ MinExponentDigits uint8
+}
+
+// RoundSignificantDigits returns the number of significant digits an
+// implementation of Convert may round to or n < 0 if there is no maximum or
+// a maximum is not recommended.
+func (r *RoundingContext) RoundSignificantDigits() (n int) {
+ if r.MaxFractionDigits == 0 && r.MaxSignificantDigits > 0 {
+ return int(r.MaxSignificantDigits)
+ } else if r.isScientific() && r.MaxIntegerDigits == 1 {
+ if r.MaxSignificantDigits == 0 ||
+ int(r.MaxFractionDigits+1) == int(r.MaxSignificantDigits) {
+ // Note: don't add DigitShift: it is only used for decimals.
+ return int(r.MaxFractionDigits) + 1
+ }
+ }
+ return -1
+}
+
+// RoundFractionDigits returns the number of fraction digits an implementation
+// of Convert may round to or n < 0 if there is no maximum or a maximum is not
+// recommended.
+func (r *RoundingContext) RoundFractionDigits() (n int) {
+ if r.MinExponentDigits == 0 &&
+ r.MaxSignificantDigits == 0 &&
+ r.MaxFractionDigits >= 0 {
+ return int(r.MaxFractionDigits) + int(r.DigitShift)
+ }
+ return -1
+}
+
+// SetScale fixes the RoundingContext to a fixed number of fraction digits.
+func (r *RoundingContext) SetScale(scale int) {
+ r.MinFractionDigits = uint8(scale)
+ r.MaxFractionDigits = int16(scale)
+}
+
+func (r *RoundingContext) SetPrecision(prec int) {
+ r.MaxSignificantDigits = int16(prec)
+}
+
+func (r *RoundingContext) isScientific() bool {
+ return r.MinExponentDigits > 0
+}
+
+func (f *Pattern) needsSep(pos int) bool {
+ p := pos - 1
+ size := int(f.GroupingSize[0])
+ if size == 0 || p == 0 {
+ return false
+ }
+ if p == size {
+ return true
+ }
+ if p -= size; p < 0 {
+ return false
+ }
+ // TODO: make second groupingsize the same as first if 0 so that we can
+ // avoid this check.
+ if x := int(f.GroupingSize[1]); x != 0 {
+ size = x
+ }
+ return p%size == 0
+}
+
+// A PatternFlag is a bit mask for the flag field of a Pattern.
+type PatternFlag uint8
+
+const (
+ AlwaysSign PatternFlag = 1 << iota
+ ElideSign // Use space instead of plus sign. AlwaysSign must be true.
+ AlwaysExpSign
+ AlwaysDecimalSeparator
+ ParenthesisForNegative // Common pattern. Saves space.
+
+ PadAfterNumber
+ PadAfterAffix
+
+ PadBeforePrefix = 0 // Default
+ PadAfterPrefix = PadAfterAffix
+ PadBeforeSuffix = PadAfterNumber
+ PadAfterSuffix = PadAfterNumber | PadAfterAffix
+ PadMask = PadAfterNumber | PadAfterAffix
+)
+
+type parser struct {
+ *Pattern
+
+ leadingSharps int
+
+ pos int
+ err error
+ doNotTerminate bool
+ groupingCount uint
+ hasGroup bool
+ buf []byte
+}
+
+func (p *parser) setError(err error) {
+ if p.err == nil {
+ p.err = err
+ }
+}
+
+func (p *parser) updateGrouping() {
+ if p.hasGroup &&
+ 0 < p.groupingCount && p.groupingCount < 255 {
+ p.GroupingSize[1] = p.GroupingSize[0]
+ p.GroupingSize[0] = uint8(p.groupingCount)
+ }
+ p.groupingCount = 0
+ p.hasGroup = true
+}
+
+var (
+ // TODO: more sensible and localizeable error messages.
+ errMultiplePadSpecifiers = errors.New("format: pattern has multiple pad specifiers")
+ errInvalidPadSpecifier = errors.New("format: invalid pad specifier")
+ errInvalidQuote = errors.New("format: invalid quote")
+ errAffixTooLarge = errors.New("format: prefix or suffix exceeds maximum UTF-8 length of 256 bytes")
+ errDuplicatePercentSign = errors.New("format: duplicate percent sign")
+ errDuplicatePermilleSign = errors.New("format: duplicate permille sign")
+ errUnexpectedEnd = errors.New("format: unexpected end of pattern")
+)
+
+// ParsePattern extracts formatting information from a CLDR number pattern.
+//
+// See https://unicode.org/reports/tr35/tr35-numbers.html#Number_Format_Patterns.
+func ParsePattern(s string) (f *Pattern, err error) {
+ p := parser{Pattern: &Pattern{}}
+
+ s = p.parseSubPattern(s)
+
+ if s != "" {
+ // Parse negative sub pattern.
+ if s[0] != ';' {
+ p.setError(errors.New("format: error parsing first sub pattern"))
+ return nil, p.err
+ }
+ neg := parser{Pattern: &Pattern{}} // just for extracting the affixes.
+ s = neg.parseSubPattern(s[len(";"):])
+ p.NegOffset = uint16(len(p.buf))
+ p.buf = append(p.buf, neg.buf...)
+ }
+ if s != "" {
+ p.setError(errors.New("format: spurious characters at end of pattern"))
+ }
+ if p.err != nil {
+ return nil, p.err
+ }
+ if affix := string(p.buf); affix == "\x00\x00" || affix == "\x00\x00\x00\x00" {
+ // No prefix or suffixes.
+ p.NegOffset = 0
+ } else {
+ p.Affix = affix
+ }
+ if p.Increment == 0 {
+ p.IncrementScale = 0
+ }
+ return p.Pattern, nil
+}
+
+func (p *parser) parseSubPattern(s string) string {
+ s = p.parsePad(s, PadBeforePrefix)
+ s = p.parseAffix(s)
+ s = p.parsePad(s, PadAfterPrefix)
+
+ s = p.parse(p.number, s)
+ p.updateGrouping()
+
+ s = p.parsePad(s, PadBeforeSuffix)
+ s = p.parseAffix(s)
+ s = p.parsePad(s, PadAfterSuffix)
+ return s
+}
+
+func (p *parser) parsePad(s string, f PatternFlag) (tail string) {
+ if len(s) >= 2 && s[0] == '*' {
+ r, sz := utf8.DecodeRuneInString(s[1:])
+ if p.PadRune != 0 {
+ p.err = errMultiplePadSpecifiers
+ } else {
+ p.Flags |= f
+ p.PadRune = r
+ }
+ return s[1+sz:]
+ }
+ return s
+}
+
+func (p *parser) parseAffix(s string) string {
+ x := len(p.buf)
+ p.buf = append(p.buf, 0) // placeholder for affix length
+
+ s = p.parse(p.affix, s)
+
+ n := len(p.buf) - x - 1
+ if n > 0xFF {
+ p.setError(errAffixTooLarge)
+ }
+ p.buf[x] = uint8(n)
+ return s
+}
+
+// state implements a state transition. It returns the new state. A state
+// function may set an error on the parser or may simply return on an incorrect
+// token and let the next phase fail.
+type state func(r rune) state
+
+// parse repeatedly applies a state function on the given string until a
+// termination condition is reached.
+func (p *parser) parse(fn state, s string) (tail string) {
+ for i, r := range s {
+ p.doNotTerminate = false
+ if fn = fn(r); fn == nil || p.err != nil {
+ return s[i:]
+ }
+ p.FormatWidth++
+ }
+ if p.doNotTerminate {
+ p.setError(errUnexpectedEnd)
+ }
+ return ""
+}
+
+func (p *parser) affix(r rune) state {
+ switch r {
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
+ '#', '@', '.', '*', ',', ';':
+ return nil
+ case '\'':
+ p.FormatWidth--
+ return p.escapeFirst
+ case '%':
+ if p.DigitShift != 0 {
+ p.setError(errDuplicatePercentSign)
+ }
+ p.DigitShift = 2
+ case '\u2030': // ‰ Per mille
+ if p.DigitShift != 0 {
+ p.setError(errDuplicatePermilleSign)
+ }
+ p.DigitShift = 3
+ // TODO: handle currency somehow: ¤, ¤¤, ¤¤¤, ¤¤¤¤
+ }
+ p.buf = append(p.buf, string(r)...)
+ return p.affix
+}
+
+func (p *parser) escapeFirst(r rune) state {
+ switch r {
+ case '\'':
+ p.buf = append(p.buf, "\\'"...)
+ return p.affix
+ default:
+ p.buf = append(p.buf, '\'')
+ p.buf = append(p.buf, string(r)...)
+ }
+ return p.escape
+}
+
+func (p *parser) escape(r rune) state {
+ switch r {
+ case '\'':
+ p.FormatWidth--
+ p.buf = append(p.buf, '\'')
+ return p.affix
+ default:
+ p.buf = append(p.buf, string(r)...)
+ }
+ return p.escape
+}
+
+// number parses a number. The BNF says the integer part should always have
+// a '0', but that does not appear to be the case according to the rest of the
+// documentation. We will allow having only '#' numbers.
+func (p *parser) number(r rune) state {
+ switch r {
+ case '#':
+ p.groupingCount++
+ p.leadingSharps++
+ case '@':
+ p.groupingCount++
+ p.leadingSharps = 0
+ p.MaxFractionDigits = -1
+ return p.sigDigits(r)
+ case ',':
+ if p.leadingSharps == 0 { // no leading commas
+ return nil
+ }
+ p.updateGrouping()
+ case 'E':
+ p.MaxIntegerDigits = uint8(p.leadingSharps)
+ return p.exponent
+ case '.': // allow ".##" etc.
+ p.updateGrouping()
+ return p.fraction
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ return p.integer(r)
+ default:
+ return nil
+ }
+ return p.number
+}
+
+func (p *parser) integer(r rune) state {
+ if !('0' <= r && r <= '9') {
+ var next state
+ switch r {
+ case 'E':
+ if p.leadingSharps > 0 {
+ p.MaxIntegerDigits = uint8(p.leadingSharps) + p.MinIntegerDigits
+ }
+ next = p.exponent
+ case '.':
+ next = p.fraction
+ case ',':
+ next = p.integer
+ }
+ p.updateGrouping()
+ return next
+ }
+ p.Increment = p.Increment*10 + uint32(r-'0')
+ p.groupingCount++
+ p.MinIntegerDigits++
+ return p.integer
+}
+
+func (p *parser) sigDigits(r rune) state {
+ switch r {
+ case '@':
+ p.groupingCount++
+ p.MaxSignificantDigits++
+ p.MinSignificantDigits++
+ case '#':
+ return p.sigDigitsFinal(r)
+ case 'E':
+ p.updateGrouping()
+ return p.normalizeSigDigitsWithExponent()
+ default:
+ p.updateGrouping()
+ return nil
+ }
+ return p.sigDigits
+}
+
+func (p *parser) sigDigitsFinal(r rune) state {
+ switch r {
+ case '#':
+ p.groupingCount++
+ p.MaxSignificantDigits++
+ case 'E':
+ p.updateGrouping()
+ return p.normalizeSigDigitsWithExponent()
+ default:
+ p.updateGrouping()
+ return nil
+ }
+ return p.sigDigitsFinal
+}
+
+func (p *parser) normalizeSigDigitsWithExponent() state {
+ p.MinIntegerDigits, p.MaxIntegerDigits = 1, 1
+ p.MinFractionDigits = p.MinSignificantDigits - 1
+ p.MaxFractionDigits = p.MaxSignificantDigits - 1
+ p.MinSignificantDigits, p.MaxSignificantDigits = 0, 0
+ return p.exponent
+}
+
+func (p *parser) fraction(r rune) state {
+ switch r {
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ p.Increment = p.Increment*10 + uint32(r-'0')
+ p.IncrementScale++
+ p.MinFractionDigits++
+ p.MaxFractionDigits++
+ case '#':
+ p.MaxFractionDigits++
+ case 'E':
+ if p.leadingSharps > 0 {
+ p.MaxIntegerDigits = uint8(p.leadingSharps) + p.MinIntegerDigits
+ }
+ return p.exponent
+ default:
+ return nil
+ }
+ return p.fraction
+}
+
+func (p *parser) exponent(r rune) state {
+ switch r {
+ case '+':
+ // Set mode and check it wasn't already set.
+ if p.Flags&AlwaysExpSign != 0 || p.MinExponentDigits > 0 {
+ break
+ }
+ p.Flags |= AlwaysExpSign
+ p.doNotTerminate = true
+ return p.exponent
+ case '0':
+ p.MinExponentDigits++
+ return p.exponent
+ }
+ // termination condition
+ if p.MinExponentDigits == 0 {
+ p.setError(errors.New("format: need at least one digit"))
+ }
+ return nil
+}
diff --git a/vendor/golang.org/x/text/internal/number/roundingmode_string.go b/vendor/golang.org/x/text/internal/number/roundingmode_string.go
new file mode 100644
index 000000000..bcc22471d
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/number/roundingmode_string.go
@@ -0,0 +1,30 @@
+// Code generated by "stringer -type RoundingMode"; DO NOT EDIT.
+
+package number
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[ToNearestEven-0]
+ _ = x[ToNearestZero-1]
+ _ = x[ToNearestAway-2]
+ _ = x[ToPositiveInf-3]
+ _ = x[ToNegativeInf-4]
+ _ = x[ToZero-5]
+ _ = x[AwayFromZero-6]
+ _ = x[numModes-7]
+}
+
+const _RoundingMode_name = "ToNearestEvenToNearestZeroToNearestAwayToPositiveInfToNegativeInfToZeroAwayFromZeronumModes"
+
+var _RoundingMode_index = [...]uint8{0, 13, 26, 39, 52, 65, 71, 83, 91}
+
+func (i RoundingMode) String() string {
+ if i >= RoundingMode(len(_RoundingMode_index)-1) {
+ return "RoundingMode(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _RoundingMode_name[_RoundingMode_index[i]:_RoundingMode_index[i+1]]
+}
diff --git a/vendor/golang.org/x/text/internal/number/tables.go b/vendor/golang.org/x/text/internal/number/tables.go
new file mode 100644
index 000000000..8efce81b5
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/number/tables.go
@@ -0,0 +1,1219 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+package number
+
+import "golang.org/x/text/internal/stringset"
+
+// CLDRVersion is the CLDR version from which the tables in this package are derived.
+const CLDRVersion = "32"
+
+var numSysData = []systemData{ // 59 elements
+ 0: {id: 0x0, digitSize: 0x1, zero: [4]uint8{0x30, 0x0, 0x0, 0x0}},
+ 1: {id: 0x1, digitSize: 0x4, zero: [4]uint8{0xf0, 0x9e, 0xa5, 0x90}},
+ 2: {id: 0x2, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x9c, 0xb0}},
+ 3: {id: 0x3, digitSize: 0x2, zero: [4]uint8{0xd9, 0xa0, 0x0, 0x0}},
+ 4: {id: 0x4, digitSize: 0x2, zero: [4]uint8{0xdb, 0xb0, 0x0, 0x0}},
+ 5: {id: 0x5, digitSize: 0x3, zero: [4]uint8{0xe1, 0xad, 0x90, 0x0}},
+ 6: {id: 0x6, digitSize: 0x3, zero: [4]uint8{0xe0, 0xa7, 0xa6, 0x0}},
+ 7: {id: 0x7, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0xb1, 0x90}},
+ 8: {id: 0x8, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x81, 0xa6}},
+ 9: {id: 0x9, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x84, 0xb6}},
+ 10: {id: 0xa, digitSize: 0x3, zero: [4]uint8{0xea, 0xa9, 0x90, 0x0}},
+ 11: {id: 0xb, digitSize: 0x3, zero: [4]uint8{0xe0, 0xa5, 0xa6, 0x0}},
+ 12: {id: 0xc, digitSize: 0x3, zero: [4]uint8{0xef, 0xbc, 0x90, 0x0}},
+ 13: {id: 0xd, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0xb5, 0x90}},
+ 14: {id: 0xe, digitSize: 0x3, zero: [4]uint8{0xe0, 0xab, 0xa6, 0x0}},
+ 15: {id: 0xf, digitSize: 0x3, zero: [4]uint8{0xe0, 0xa9, 0xa6, 0x0}},
+ 16: {id: 0x10, digitSize: 0x4, zero: [4]uint8{0xf0, 0x96, 0xad, 0x90}},
+ 17: {id: 0x11, digitSize: 0x3, zero: [4]uint8{0xea, 0xa7, 0x90, 0x0}},
+ 18: {id: 0x12, digitSize: 0x3, zero: [4]uint8{0xea, 0xa4, 0x80, 0x0}},
+ 19: {id: 0x13, digitSize: 0x3, zero: [4]uint8{0xe1, 0x9f, 0xa0, 0x0}},
+ 20: {id: 0x14, digitSize: 0x3, zero: [4]uint8{0xe0, 0xb3, 0xa6, 0x0}},
+ 21: {id: 0x15, digitSize: 0x3, zero: [4]uint8{0xe1, 0xaa, 0x80, 0x0}},
+ 22: {id: 0x16, digitSize: 0x3, zero: [4]uint8{0xe1, 0xaa, 0x90, 0x0}},
+ 23: {id: 0x17, digitSize: 0x3, zero: [4]uint8{0xe0, 0xbb, 0x90, 0x0}},
+ 24: {id: 0x18, digitSize: 0x3, zero: [4]uint8{0xe1, 0xb1, 0x80, 0x0}},
+ 25: {id: 0x19, digitSize: 0x3, zero: [4]uint8{0xe1, 0xa5, 0x86, 0x0}},
+ 26: {id: 0x1a, digitSize: 0x4, zero: [4]uint8{0xf0, 0x9d, 0x9f, 0x8e}},
+ 27: {id: 0x1b, digitSize: 0x4, zero: [4]uint8{0xf0, 0x9d, 0x9f, 0x98}},
+ 28: {id: 0x1c, digitSize: 0x4, zero: [4]uint8{0xf0, 0x9d, 0x9f, 0xb6}},
+ 29: {id: 0x1d, digitSize: 0x4, zero: [4]uint8{0xf0, 0x9d, 0x9f, 0xac}},
+ 30: {id: 0x1e, digitSize: 0x4, zero: [4]uint8{0xf0, 0x9d, 0x9f, 0xa2}},
+ 31: {id: 0x1f, digitSize: 0x3, zero: [4]uint8{0xe0, 0xb5, 0xa6, 0x0}},
+ 32: {id: 0x20, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x99, 0x90}},
+ 33: {id: 0x21, digitSize: 0x3, zero: [4]uint8{0xe1, 0xa0, 0x90, 0x0}},
+ 34: {id: 0x22, digitSize: 0x4, zero: [4]uint8{0xf0, 0x96, 0xa9, 0xa0}},
+ 35: {id: 0x23, digitSize: 0x3, zero: [4]uint8{0xea, 0xaf, 0xb0, 0x0}},
+ 36: {id: 0x24, digitSize: 0x3, zero: [4]uint8{0xe1, 0x81, 0x80, 0x0}},
+ 37: {id: 0x25, digitSize: 0x3, zero: [4]uint8{0xe1, 0x82, 0x90, 0x0}},
+ 38: {id: 0x26, digitSize: 0x3, zero: [4]uint8{0xea, 0xa7, 0xb0, 0x0}},
+ 39: {id: 0x27, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x91, 0x90}},
+ 40: {id: 0x28, digitSize: 0x2, zero: [4]uint8{0xdf, 0x80, 0x0, 0x0}},
+ 41: {id: 0x29, digitSize: 0x3, zero: [4]uint8{0xe1, 0xb1, 0x90, 0x0}},
+ 42: {id: 0x2a, digitSize: 0x3, zero: [4]uint8{0xe0, 0xad, 0xa6, 0x0}},
+ 43: {id: 0x2b, digitSize: 0x4, zero: [4]uint8{0xf0, 0x90, 0x92, 0xa0}},
+ 44: {id: 0x2c, digitSize: 0x3, zero: [4]uint8{0xea, 0xa3, 0x90, 0x0}},
+ 45: {id: 0x2d, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x87, 0x90}},
+ 46: {id: 0x2e, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x8b, 0xb0}},
+ 47: {id: 0x2f, digitSize: 0x3, zero: [4]uint8{0xe0, 0xb7, 0xa6, 0x0}},
+ 48: {id: 0x30, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x83, 0xb0}},
+ 49: {id: 0x31, digitSize: 0x3, zero: [4]uint8{0xe1, 0xae, 0xb0, 0x0}},
+ 50: {id: 0x32, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x9b, 0x80}},
+ 51: {id: 0x33, digitSize: 0x3, zero: [4]uint8{0xe1, 0xa7, 0x90, 0x0}},
+ 52: {id: 0x34, digitSize: 0x3, zero: [4]uint8{0xe0, 0xaf, 0xa6, 0x0}},
+ 53: {id: 0x35, digitSize: 0x3, zero: [4]uint8{0xe0, 0xb1, 0xa6, 0x0}},
+ 54: {id: 0x36, digitSize: 0x3, zero: [4]uint8{0xe0, 0xb9, 0x90, 0x0}},
+ 55: {id: 0x37, digitSize: 0x3, zero: [4]uint8{0xe0, 0xbc, 0xa0, 0x0}},
+ 56: {id: 0x38, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x93, 0x90}},
+ 57: {id: 0x39, digitSize: 0x3, zero: [4]uint8{0xea, 0x98, 0xa0, 0x0}},
+ 58: {id: 0x3a, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0xa3, 0xa0}},
+} // Size: 378 bytes
+
+const (
+ numAdlm = 0x1
+ numAhom = 0x2
+ numArab = 0x3
+ numArabext = 0x4
+ numArmn = 0x3b
+ numArmnlow = 0x3c
+ numBali = 0x5
+ numBeng = 0x6
+ numBhks = 0x7
+ numBrah = 0x8
+ numCakm = 0x9
+ numCham = 0xa
+ numCyrl = 0x3d
+ numDeva = 0xb
+ numEthi = 0x3e
+ numFullwide = 0xc
+ numGeor = 0x3f
+ numGonm = 0xd
+ numGrek = 0x40
+ numGreklow = 0x41
+ numGujr = 0xe
+ numGuru = 0xf
+ numHanidays = 0x42
+ numHanidec = 0x43
+ numHans = 0x44
+ numHansfin = 0x45
+ numHant = 0x46
+ numHantfin = 0x47
+ numHebr = 0x48
+ numHmng = 0x10
+ numJava = 0x11
+ numJpan = 0x49
+ numJpanfin = 0x4a
+ numKali = 0x12
+ numKhmr = 0x13
+ numKnda = 0x14
+ numLana = 0x15
+ numLanatham = 0x16
+ numLaoo = 0x17
+ numLatn = 0x0
+ numLepc = 0x18
+ numLimb = 0x19
+ numMathbold = 0x1a
+ numMathdbl = 0x1b
+ numMathmono = 0x1c
+ numMathsanb = 0x1d
+ numMathsans = 0x1e
+ numMlym = 0x1f
+ numModi = 0x20
+ numMong = 0x21
+ numMroo = 0x22
+ numMtei = 0x23
+ numMymr = 0x24
+ numMymrshan = 0x25
+ numMymrtlng = 0x26
+ numNewa = 0x27
+ numNkoo = 0x28
+ numOlck = 0x29
+ numOrya = 0x2a
+ numOsma = 0x2b
+ numRoman = 0x4b
+ numRomanlow = 0x4c
+ numSaur = 0x2c
+ numShrd = 0x2d
+ numSind = 0x2e
+ numSinh = 0x2f
+ numSora = 0x30
+ numSund = 0x31
+ numTakr = 0x32
+ numTalu = 0x33
+ numTaml = 0x4d
+ numTamldec = 0x34
+ numTelu = 0x35
+ numThai = 0x36
+ numTibt = 0x37
+ numTirh = 0x38
+ numVaii = 0x39
+ numWara = 0x3a
+ numNumberSystems
+)
+
+var systemMap = map[string]system{
+ "adlm": numAdlm,
+ "ahom": numAhom,
+ "arab": numArab,
+ "arabext": numArabext,
+ "armn": numArmn,
+ "armnlow": numArmnlow,
+ "bali": numBali,
+ "beng": numBeng,
+ "bhks": numBhks,
+ "brah": numBrah,
+ "cakm": numCakm,
+ "cham": numCham,
+ "cyrl": numCyrl,
+ "deva": numDeva,
+ "ethi": numEthi,
+ "fullwide": numFullwide,
+ "geor": numGeor,
+ "gonm": numGonm,
+ "grek": numGrek,
+ "greklow": numGreklow,
+ "gujr": numGujr,
+ "guru": numGuru,
+ "hanidays": numHanidays,
+ "hanidec": numHanidec,
+ "hans": numHans,
+ "hansfin": numHansfin,
+ "hant": numHant,
+ "hantfin": numHantfin,
+ "hebr": numHebr,
+ "hmng": numHmng,
+ "java": numJava,
+ "jpan": numJpan,
+ "jpanfin": numJpanfin,
+ "kali": numKali,
+ "khmr": numKhmr,
+ "knda": numKnda,
+ "lana": numLana,
+ "lanatham": numLanatham,
+ "laoo": numLaoo,
+ "latn": numLatn,
+ "lepc": numLepc,
+ "limb": numLimb,
+ "mathbold": numMathbold,
+ "mathdbl": numMathdbl,
+ "mathmono": numMathmono,
+ "mathsanb": numMathsanb,
+ "mathsans": numMathsans,
+ "mlym": numMlym,
+ "modi": numModi,
+ "mong": numMong,
+ "mroo": numMroo,
+ "mtei": numMtei,
+ "mymr": numMymr,
+ "mymrshan": numMymrshan,
+ "mymrtlng": numMymrtlng,
+ "newa": numNewa,
+ "nkoo": numNkoo,
+ "olck": numOlck,
+ "orya": numOrya,
+ "osma": numOsma,
+ "roman": numRoman,
+ "romanlow": numRomanlow,
+ "saur": numSaur,
+ "shrd": numShrd,
+ "sind": numSind,
+ "sinh": numSinh,
+ "sora": numSora,
+ "sund": numSund,
+ "takr": numTakr,
+ "talu": numTalu,
+ "taml": numTaml,
+ "tamldec": numTamldec,
+ "telu": numTelu,
+ "thai": numThai,
+ "tibt": numTibt,
+ "tirh": numTirh,
+ "vaii": numVaii,
+ "wara": numWara,
+}
+
+var symIndex = [][12]uint8{ // 81 elements
+ 0: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb},
+ 1: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb},
+ 2: [12]uint8{0x0, 0x1, 0x2, 0xd, 0xe, 0xf, 0x6, 0x7, 0x8, 0x9, 0x10, 0xb},
+ 3: [12]uint8{0x1, 0x0, 0x2, 0xd, 0xe, 0xf, 0x6, 0x7, 0x8, 0x9, 0x10, 0xb},
+ 4: [12]uint8{0x0, 0x1, 0x2, 0x11, 0xe, 0xf, 0x6, 0x7, 0x8, 0x9, 0x10, 0xb},
+ 5: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x12, 0xb},
+ 6: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb},
+ 7: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x13, 0xb},
+ 8: [12]uint8{0x0, 0x1, 0x2, 0x3, 0xe, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb},
+ 9: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0x0},
+ 10: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x5, 0x6, 0x14, 0x8, 0x9, 0xa, 0xb},
+ 11: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x14, 0x8, 0x9, 0xa, 0xb},
+ 12: [12]uint8{0x0, 0x15, 0x2, 0x3, 0x4, 0x5, 0x6, 0x14, 0x8, 0x9, 0xa, 0xb},
+ 13: [12]uint8{0x0, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb},
+ 14: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x16, 0xb},
+ 15: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x5, 0x17, 0x7, 0x8, 0x9, 0xa, 0xb},
+ 16: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x17, 0x7, 0x8, 0x9, 0xa, 0x0},
+ 17: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x17, 0x7, 0x8, 0x9, 0xa, 0xb},
+ 18: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0x0},
+ 19: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x18, 0x7, 0x8, 0x9, 0xa, 0xb},
+ 20: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x19, 0x1a, 0xa, 0xb},
+ 21: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x1b, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb},
+ 22: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x1b, 0x18, 0x7, 0x8, 0x9, 0xa, 0xb},
+ 23: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x1b, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb},
+ 24: [12]uint8{0x0, 0x1, 0x2, 0x3, 0xe, 0x1c, 0x6, 0x7, 0x8, 0x9, 0x1d, 0xb},
+ 25: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x1b, 0x6, 0x7, 0x8, 0x9, 0x1e, 0x0},
+ 26: [12]uint8{0x0, 0x15, 0x2, 0x3, 0x4, 0x1b, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb},
+ 27: [12]uint8{0x0, 0x1, 0x2, 0x3, 0xe, 0xf, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb},
+ 28: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x1f, 0xb},
+ 29: [12]uint8{0x0, 0x15, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb},
+ 30: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x20, 0xb},
+ 31: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x21, 0x7, 0x8, 0x9, 0x22, 0xb},
+ 32: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x23, 0xb},
+ 33: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x1b, 0x18, 0x14, 0x8, 0x9, 0x24, 0xb},
+ 34: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x1b, 0x18, 0x7, 0x8, 0x9, 0x24, 0xb},
+ 35: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x25, 0xb},
+ 36: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x26, 0xb},
+ 37: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x27, 0xb},
+ 38: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x28, 0xb},
+ 39: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x29, 0xb},
+ 40: [12]uint8{0x1, 0x0, 0x2, 0x3, 0xe, 0x1c, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb},
+ 41: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x2a, 0xb},
+ 42: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x2b, 0xb},
+ 43: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x1b, 0x2c, 0x14, 0x8, 0x9, 0x24, 0xb},
+ 44: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0x0},
+ 45: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x17, 0x7, 0x8, 0x9, 0xa, 0xb},
+ 46: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x1b, 0x17, 0x7, 0x8, 0x9, 0xa, 0xb},
+ 47: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x2d, 0x0},
+ 48: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x2e, 0xb},
+ 49: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x2f, 0xb},
+ 50: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x30, 0x7, 0x8, 0x9, 0xa, 0xb},
+ 51: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x31, 0xb},
+ 52: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x32, 0xb},
+ 53: [12]uint8{0x1, 0x15, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb},
+ 54: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x33, 0xb},
+ 55: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x34, 0xb},
+ 56: [12]uint8{0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x7, 0x3c, 0x9, 0xa, 0xb},
+ 57: [12]uint8{0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x7, 0x3c, 0x9, 0x3d, 0xb},
+ 58: [12]uint8{0x35, 0x36, 0x37, 0x11, 0x3e, 0x3f, 0x3b, 0x7, 0x3c, 0x9, 0xa, 0xb},
+ 59: [12]uint8{0x35, 0x36, 0x37, 0x11, 0x39, 0x3a, 0x3b, 0x7, 0x3c, 0x9, 0xa, 0xb},
+ 60: [12]uint8{0x35, 0x36, 0x37, 0x11, 0x39, 0x40, 0x3b, 0x7, 0x3c, 0x9, 0xa, 0xb},
+ 61: [12]uint8{0x35, 0x36, 0x37, 0x41, 0x3e, 0x3f, 0x3b, 0x7, 0x3c, 0x9, 0xa, 0xb},
+ 62: [12]uint8{0x35, 0x36, 0x37, 0x38, 0x3e, 0x3f, 0x3b, 0x7, 0x3c, 0x9, 0xa, 0xb},
+ 63: [12]uint8{0x35, 0xc, 0x37, 0x38, 0x39, 0x42, 0x3b, 0x7, 0x3c, 0x9, 0xa, 0x0},
+ 64: [12]uint8{0x35, 0xc, 0x37, 0x38, 0x39, 0x42, 0x43, 0x7, 0x44, 0x9, 0x24, 0xb},
+ 65: [12]uint8{0x35, 0x36, 0x37, 0x38, 0x39, 0x5, 0x3b, 0x7, 0x3c, 0x9, 0x33, 0xb},
+ 66: [12]uint8{0x35, 0x36, 0x37, 0x11, 0x45, 0x46, 0x43, 0x7, 0x3c, 0x9, 0xa, 0x35},
+ 67: [12]uint8{0x35, 0x36, 0x37, 0x11, 0xe, 0x1c, 0x43, 0x7, 0x3c, 0x9, 0x1d, 0xb},
+ 68: [12]uint8{0x35, 0x36, 0x37, 0x11, 0xe, 0x1c, 0x43, 0x7, 0x3c, 0x9, 0xa, 0x35},
+ 69: [12]uint8{0x35, 0x36, 0x37, 0x11, 0x45, 0x5, 0x43, 0x7, 0x3c, 0x9, 0xa, 0x35},
+ 70: [12]uint8{0x1, 0xc, 0x37, 0x11, 0x45, 0x47, 0x43, 0x7, 0x3c, 0x9, 0xa, 0x0},
+ 71: [12]uint8{0x35, 0x1, 0x37, 0x11, 0x4, 0x5, 0x43, 0x7, 0x3c, 0x9, 0xa, 0x35},
+ 72: [12]uint8{0x1, 0xc, 0x37, 0x11, 0x45, 0x47, 0x43, 0x7, 0x3c, 0x9, 0x24, 0xb},
+ 73: [12]uint8{0x35, 0x36, 0x2, 0x3, 0x45, 0x46, 0x43, 0x7, 0x8, 0x9, 0xa, 0x35},
+ 74: [12]uint8{0x35, 0x36, 0x37, 0x11, 0x4, 0x5, 0x43, 0x7, 0x3c, 0x9, 0x31, 0x35},
+ 75: [12]uint8{0x35, 0x36, 0x37, 0x11, 0x4, 0x5, 0x43, 0x7, 0x3c, 0x9, 0x32, 0x35},
+ 76: [12]uint8{0x35, 0x36, 0x37, 0x11, 0x48, 0x46, 0x43, 0x7, 0x3c, 0x9, 0x33, 0x35},
+ 77: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0x49},
+ 78: [12]uint8{0x0, 0x1, 0x4a, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x28, 0xb},
+ 79: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x4b, 0xb},
+ 80: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x4c, 0x4d, 0xb},
+} // Size: 996 bytes
+
+var symData = stringset.Set{
+ Data: "" + // Size: 599 bytes
+ ".,;%+-E׉∞NaN:\u00a0\u200e%\u200e\u200e+\u200e-ليس\u00a0رقمًا٪NDТерхьаш" +
+ "\u00a0дац·’mnne×10^0/00INF−\u200e−ناعددepälukuՈչԹარ\u00a0არის\u00a0რიცხვ" +
+ "იZMdMсан\u00a0емес¤¤¤сан\u00a0эмесບໍ່\u200bແມ່ນ\u200bໂຕ\u200bເລກNSဂဏန်" +
+ "းမဟုတ်သောННне\u00a0числочыыһыла\u00a0буотах·10^epilohosan\u00a0dälTFЕs" +
+ "on\u00a0emasҳақиқий\u00a0сон\u00a0эмас非數值非数值٫٬؛٪\u061c\u061c+\u061c-اس؉ل" +
+ "يس\u00a0رقم\u200f+\u200f-\u200f−٪\u200f\u061c−×۱۰^؉\u200f\u200e+\u200e" +
+ "\u200e-\u200e\u200e−\u200e+\u200e:၊ཨང་མེན་གྲངས་མེདཨང་མད",
+ Index: []uint16{ // 79 elements
+ // Entry 0 - 3F
+ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007,
+ 0x0009, 0x000c, 0x000f, 0x0012, 0x0013, 0x0015, 0x001c, 0x0020,
+ 0x0024, 0x0036, 0x0038, 0x003a, 0x0050, 0x0052, 0x0055, 0x0058,
+ 0x0059, 0x005e, 0x0062, 0x0065, 0x0068, 0x006e, 0x0078, 0x0080,
+ 0x0086, 0x00ae, 0x00af, 0x00b2, 0x00c2, 0x00c8, 0x00d8, 0x0105,
+ 0x0107, 0x012e, 0x0132, 0x0142, 0x015e, 0x0163, 0x016a, 0x0173,
+ 0x0175, 0x0177, 0x0180, 0x01a0, 0x01a9, 0x01b2, 0x01b4, 0x01b6,
+ 0x01b8, 0x01bc, 0x01bf, 0x01c2, 0x01c6, 0x01c8, 0x01d6, 0x01da,
+ // Entry 40 - 7F
+ 0x01de, 0x01e4, 0x01e9, 0x01ee, 0x01f5, 0x01fa, 0x0201, 0x0208,
+ 0x0211, 0x0215, 0x0218, 0x021b, 0x0230, 0x0248, 0x0257,
+ },
+} // Size: 797 bytes
+
+// langToDefaults maps a compact language index to the default numbering system
+// and default symbol set
+var langToDefaults = [775]symOffset{
+ // Entry 0 - 3F
+ 0x8000, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x8003, 0x0002, 0x0002, 0x0002, 0x0002, 0x0003,
+ 0x0002, 0x0002, 0x0002, 0x0002, 0x0002, 0x0002, 0x0002, 0x0002,
+ 0x0003, 0x0003, 0x0003, 0x0003, 0x0002, 0x0002, 0x0002, 0x0004,
+ 0x0002, 0x0004, 0x0002, 0x0002, 0x0002, 0x0003, 0x0002, 0x0000,
+ 0x8005, 0x0000, 0x0000, 0x0000, 0x8006, 0x0005, 0x0006, 0x0006,
+ 0x0006, 0x0006, 0x0006, 0x0001, 0x0001, 0x0001, 0x0001, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0001, 0x0001, 0x0000, 0x0000, 0x0000,
+ // Entry 40 - 7F
+ 0x8009, 0x0000, 0x0000, 0x800a, 0x0000, 0x0000, 0x800c, 0x0001,
+ 0x0000, 0x0000, 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, 0x0006,
+ 0x0006, 0x0006, 0x0006, 0x0006, 0x800e, 0x0000, 0x0000, 0x0007,
+ 0x0007, 0x0000, 0x0000, 0x0000, 0x0000, 0x800f, 0x0008, 0x0008,
+ 0x8011, 0x0001, 0x0001, 0x0001, 0x803c, 0x0000, 0x0009, 0x0009,
+ 0x0009, 0x0000, 0x0000, 0x000a, 0x000b, 0x000a, 0x000c, 0x000a,
+ 0x000a, 0x000c, 0x000a, 0x000d, 0x000d, 0x000a, 0x000a, 0x0001,
+ 0x0001, 0x0000, 0x0001, 0x0001, 0x803f, 0x0000, 0x0000, 0x0000,
+ // Entry 80 - BF
+ 0x000e, 0x000e, 0x000e, 0x000f, 0x000f, 0x000f, 0x0000, 0x0000,
+ 0x0006, 0x0000, 0x0000, 0x0000, 0x000a, 0x0010, 0x0000, 0x0006,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0011, 0x0000, 0x000a,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x000a, 0x0000, 0x0009, 0x0000,
+ 0x0000, 0x0012, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ // Entry C0 - FF
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0006, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0013, 0x0000,
+ 0x0000, 0x000f, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0000, 0x0000, 0x0015,
+ 0x0015, 0x0006, 0x0000, 0x0006, 0x0006, 0x0000, 0x0000, 0x0006,
+ 0x0006, 0x0001, 0x0000, 0x0000, 0x0006, 0x0006, 0x0006, 0x0006,
+ // Entry 100 - 13F
+ 0x0000, 0x0000, 0x0006, 0x0000, 0x0000, 0x0000, 0x0000, 0x0006,
+ 0x0000, 0x0006, 0x0000, 0x0000, 0x0006, 0x0006, 0x0016, 0x0016,
+ 0x0017, 0x0017, 0x0001, 0x0001, 0x8041, 0x0018, 0x0018, 0x0001,
+ 0x0001, 0x0001, 0x0001, 0x0001, 0x0019, 0x0019, 0x0000, 0x0000,
+ 0x0017, 0x0017, 0x0017, 0x8044, 0x0001, 0x0001, 0x0001, 0x0001,
+ 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001,
+ 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001,
+ 0x0001, 0x0001, 0x0006, 0x0006, 0x0001, 0x0001, 0x0001, 0x0001,
+ // Entry 140 - 17F
+ 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001,
+ 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001,
+ 0x0001, 0x0001, 0x0006, 0x0006, 0x0006, 0x0006, 0x0000, 0x0000,
+ 0x8047, 0x0000, 0x0006, 0x0006, 0x001a, 0x001a, 0x001a, 0x001a,
+ 0x804a, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x804c, 0x001b, 0x0000,
+ 0x0000, 0x0006, 0x0006, 0x0006, 0x000a, 0x000a, 0x0001, 0x0001,
+ 0x001c, 0x001c, 0x0009, 0x0009, 0x804f, 0x0000, 0x0000, 0x0000,
+ // Entry 180 - 1BF
+ 0x0000, 0x0000, 0x8052, 0x0006, 0x0006, 0x001d, 0x0006, 0x0006,
+ 0x0006, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0006, 0x0006,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x001e, 0x001e, 0x001f,
+ 0x001f, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0001,
+ 0x0001, 0x000d, 0x000d, 0x0000, 0x0000, 0x0020, 0x0020, 0x0006,
+ 0x0006, 0x0021, 0x0021, 0x0000, 0x0000, 0x0006, 0x0006, 0x0000,
+ 0x0000, 0x8054, 0x0000, 0x0000, 0x0000, 0x0000, 0x8056, 0x001b,
+ 0x0000, 0x0000, 0x0001, 0x0001, 0x0022, 0x0022, 0x0000, 0x0000,
+ // Entry 1C0 - 1FF
+ 0x0000, 0x0023, 0x0023, 0x0000, 0x0000, 0x0006, 0x0006, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0006, 0x0006, 0x0006, 0x0006, 0x0006,
+ 0x0024, 0x0024, 0x8058, 0x0000, 0x0000, 0x0016, 0x0016, 0x0006,
+ 0x0006, 0x0000, 0x0000, 0x0000, 0x0000, 0x0025, 0x0025, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x000d, 0x000d, 0x0000, 0x0000,
+ 0x0006, 0x0006, 0x0000, 0x0000, 0x0006, 0x0006, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x805a, 0x0000, 0x0000, 0x0006, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0006, 0x0006, 0x805b, 0x0026, 0x805d,
+ // Entry 200 - 23F
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x805e, 0x0015, 0x0015, 0x0000,
+ 0x0000, 0x0006, 0x0006, 0x0006, 0x8061, 0x0000, 0x0000, 0x8062,
+ 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, 0x0001,
+ 0x0001, 0x0015, 0x0015, 0x0006, 0x0006, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0027, 0x0027, 0x0027, 0x8065, 0x8067,
+ 0x001b, 0x0000, 0x0000, 0x0000, 0x0001, 0x0001, 0x0001, 0x0001,
+ 0x8069, 0x0028, 0x0006, 0x0001, 0x0006, 0x0001, 0x0001, 0x0001,
+ // Entry 240 - 27F
+ 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0000,
+ 0x0006, 0x0000, 0x0000, 0x001a, 0x001a, 0x0006, 0x0006, 0x0006,
+ 0x0006, 0x0006, 0x0000, 0x0000, 0x0029, 0x0029, 0x0029, 0x0029,
+ 0x0029, 0x0029, 0x0029, 0x0006, 0x0006, 0x0000, 0x0000, 0x002a,
+ 0x002a, 0x0000, 0x0000, 0x0000, 0x0000, 0x806b, 0x0000, 0x0000,
+ 0x002b, 0x002b, 0x002b, 0x002b, 0x0006, 0x0006, 0x000d, 0x000d,
+ 0x0006, 0x0006, 0x0000, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001,
+ 0x002c, 0x002c, 0x002d, 0x002d, 0x002e, 0x002e, 0x0000, 0x0000,
+ // Entry 280 - 2BF
+ 0x0000, 0x002f, 0x002f, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0001, 0x0001, 0x0001, 0x0001, 0x0006,
+ 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, 0x0006,
+ 0x0006, 0x0006, 0x0000, 0x0000, 0x0000, 0x806d, 0x0022, 0x0022,
+ 0x0022, 0x0000, 0x0006, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0001, 0x0001, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0030, 0x0030, 0x0000, 0x0000, 0x8071, 0x0031, 0x0006,
+ // Entry 2C0 - 2FF
+ 0x0006, 0x0006, 0x0000, 0x0001, 0x0001, 0x000d, 0x000d, 0x0001,
+ 0x0001, 0x0000, 0x0000, 0x0032, 0x0032, 0x8074, 0x8076, 0x001b,
+ 0x8077, 0x8079, 0x0028, 0x807b, 0x0034, 0x0033, 0x0033, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0006, 0x0006, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0035, 0x0035, 0x0006, 0x0006,
+ 0x0000, 0x0000, 0x0000, 0x0001, 0x0001, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0036, 0x0037, 0x0037, 0x0036, 0x0036, 0x0001,
+ 0x0001, 0x807d, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8080,
+ // Entry 300 - 33F
+ 0x0036, 0x0036, 0x0036, 0x0000, 0x0000, 0x0006, 0x0014,
+} // Size: 1550 bytes
+
+// langToAlt is a list of numbering system and symbol set pairs, sorted and
+// marked by compact language index.
+var langToAlt = []altSymData{ // 131 elements
+ 1: {compactTag: 0x0, symIndex: 0x38, system: 0x3},
+ 2: {compactTag: 0x0, symIndex: 0x42, system: 0x4},
+ 3: {compactTag: 0xa, symIndex: 0x39, system: 0x3},
+ 4: {compactTag: 0xa, symIndex: 0x2, system: 0x0},
+ 5: {compactTag: 0x28, symIndex: 0x0, system: 0x6},
+ 6: {compactTag: 0x2c, symIndex: 0x5, system: 0x0},
+ 7: {compactTag: 0x2c, symIndex: 0x3a, system: 0x3},
+ 8: {compactTag: 0x2c, symIndex: 0x42, system: 0x4},
+ 9: {compactTag: 0x40, symIndex: 0x0, system: 0x6},
+ 10: {compactTag: 0x43, symIndex: 0x0, system: 0x0},
+ 11: {compactTag: 0x43, symIndex: 0x4f, system: 0x37},
+ 12: {compactTag: 0x46, symIndex: 0x1, system: 0x0},
+ 13: {compactTag: 0x46, symIndex: 0x38, system: 0x3},
+ 14: {compactTag: 0x54, symIndex: 0x0, system: 0x9},
+ 15: {compactTag: 0x5d, symIndex: 0x3a, system: 0x3},
+ 16: {compactTag: 0x5d, symIndex: 0x8, system: 0x0},
+ 17: {compactTag: 0x60, symIndex: 0x1, system: 0x0},
+ 18: {compactTag: 0x60, symIndex: 0x38, system: 0x3},
+ 19: {compactTag: 0x60, symIndex: 0x42, system: 0x4},
+ 20: {compactTag: 0x60, symIndex: 0x0, system: 0x5},
+ 21: {compactTag: 0x60, symIndex: 0x0, system: 0x6},
+ 22: {compactTag: 0x60, symIndex: 0x0, system: 0x8},
+ 23: {compactTag: 0x60, symIndex: 0x0, system: 0x9},
+ 24: {compactTag: 0x60, symIndex: 0x0, system: 0xa},
+ 25: {compactTag: 0x60, symIndex: 0x0, system: 0xb},
+ 26: {compactTag: 0x60, symIndex: 0x0, system: 0xc},
+ 27: {compactTag: 0x60, symIndex: 0x0, system: 0xd},
+ 28: {compactTag: 0x60, symIndex: 0x0, system: 0xe},
+ 29: {compactTag: 0x60, symIndex: 0x0, system: 0xf},
+ 30: {compactTag: 0x60, symIndex: 0x0, system: 0x11},
+ 31: {compactTag: 0x60, symIndex: 0x0, system: 0x12},
+ 32: {compactTag: 0x60, symIndex: 0x0, system: 0x13},
+ 33: {compactTag: 0x60, symIndex: 0x0, system: 0x14},
+ 34: {compactTag: 0x60, symIndex: 0x0, system: 0x15},
+ 35: {compactTag: 0x60, symIndex: 0x0, system: 0x16},
+ 36: {compactTag: 0x60, symIndex: 0x0, system: 0x17},
+ 37: {compactTag: 0x60, symIndex: 0x0, system: 0x18},
+ 38: {compactTag: 0x60, symIndex: 0x0, system: 0x19},
+ 39: {compactTag: 0x60, symIndex: 0x0, system: 0x1f},
+ 40: {compactTag: 0x60, symIndex: 0x0, system: 0x21},
+ 41: {compactTag: 0x60, symIndex: 0x0, system: 0x23},
+ 42: {compactTag: 0x60, symIndex: 0x0, system: 0x24},
+ 43: {compactTag: 0x60, symIndex: 0x0, system: 0x25},
+ 44: {compactTag: 0x60, symIndex: 0x0, system: 0x28},
+ 45: {compactTag: 0x60, symIndex: 0x0, system: 0x29},
+ 46: {compactTag: 0x60, symIndex: 0x0, system: 0x2a},
+ 47: {compactTag: 0x60, symIndex: 0x0, system: 0x2b},
+ 48: {compactTag: 0x60, symIndex: 0x0, system: 0x2c},
+ 49: {compactTag: 0x60, symIndex: 0x0, system: 0x2d},
+ 50: {compactTag: 0x60, symIndex: 0x0, system: 0x30},
+ 51: {compactTag: 0x60, symIndex: 0x0, system: 0x31},
+ 52: {compactTag: 0x60, symIndex: 0x0, system: 0x32},
+ 53: {compactTag: 0x60, symIndex: 0x0, system: 0x33},
+ 54: {compactTag: 0x60, symIndex: 0x0, system: 0x34},
+ 55: {compactTag: 0x60, symIndex: 0x0, system: 0x35},
+ 56: {compactTag: 0x60, symIndex: 0x0, system: 0x36},
+ 57: {compactTag: 0x60, symIndex: 0x0, system: 0x37},
+ 58: {compactTag: 0x60, symIndex: 0x0, system: 0x39},
+ 59: {compactTag: 0x60, symIndex: 0x0, system: 0x43},
+ 60: {compactTag: 0x64, symIndex: 0x0, system: 0x0},
+ 61: {compactTag: 0x64, symIndex: 0x38, system: 0x3},
+ 62: {compactTag: 0x64, symIndex: 0x42, system: 0x4},
+ 63: {compactTag: 0x7c, symIndex: 0x50, system: 0x37},
+ 64: {compactTag: 0x7c, symIndex: 0x0, system: 0x0},
+ 65: {compactTag: 0x114, symIndex: 0x43, system: 0x4},
+ 66: {compactTag: 0x114, symIndex: 0x18, system: 0x0},
+ 67: {compactTag: 0x114, symIndex: 0x3b, system: 0x3},
+ 68: {compactTag: 0x123, symIndex: 0x1, system: 0x0},
+ 69: {compactTag: 0x123, symIndex: 0x3c, system: 0x3},
+ 70: {compactTag: 0x123, symIndex: 0x44, system: 0x4},
+ 71: {compactTag: 0x158, symIndex: 0x0, system: 0x0},
+ 72: {compactTag: 0x158, symIndex: 0x3b, system: 0x3},
+ 73: {compactTag: 0x158, symIndex: 0x45, system: 0x4},
+ 74: {compactTag: 0x160, symIndex: 0x0, system: 0x0},
+ 75: {compactTag: 0x160, symIndex: 0x38, system: 0x3},
+ 76: {compactTag: 0x16d, symIndex: 0x1b, system: 0x0},
+ 77: {compactTag: 0x16d, symIndex: 0x0, system: 0x9},
+ 78: {compactTag: 0x16d, symIndex: 0x0, system: 0xa},
+ 79: {compactTag: 0x17c, symIndex: 0x0, system: 0x0},
+ 80: {compactTag: 0x17c, symIndex: 0x3d, system: 0x3},
+ 81: {compactTag: 0x17c, symIndex: 0x42, system: 0x4},
+ 82: {compactTag: 0x182, symIndex: 0x6, system: 0x0},
+ 83: {compactTag: 0x182, symIndex: 0x38, system: 0x3},
+ 84: {compactTag: 0x1b1, symIndex: 0x0, system: 0x0},
+ 85: {compactTag: 0x1b1, symIndex: 0x3e, system: 0x3},
+ 86: {compactTag: 0x1b6, symIndex: 0x42, system: 0x4},
+ 87: {compactTag: 0x1b6, symIndex: 0x1b, system: 0x0},
+ 88: {compactTag: 0x1d2, symIndex: 0x42, system: 0x4},
+ 89: {compactTag: 0x1d2, symIndex: 0x0, system: 0x0},
+ 90: {compactTag: 0x1f3, symIndex: 0x0, system: 0xb},
+ 91: {compactTag: 0x1fd, symIndex: 0x4e, system: 0x24},
+ 92: {compactTag: 0x1fd, symIndex: 0x26, system: 0x0},
+ 93: {compactTag: 0x1ff, symIndex: 0x42, system: 0x4},
+ 94: {compactTag: 0x204, symIndex: 0x15, system: 0x0},
+ 95: {compactTag: 0x204, symIndex: 0x3f, system: 0x3},
+ 96: {compactTag: 0x204, symIndex: 0x46, system: 0x4},
+ 97: {compactTag: 0x20c, symIndex: 0x0, system: 0xb},
+ 98: {compactTag: 0x20f, symIndex: 0x6, system: 0x0},
+ 99: {compactTag: 0x20f, symIndex: 0x38, system: 0x3},
+ 100: {compactTag: 0x20f, symIndex: 0x42, system: 0x4},
+ 101: {compactTag: 0x22e, symIndex: 0x0, system: 0x0},
+ 102: {compactTag: 0x22e, symIndex: 0x47, system: 0x4},
+ 103: {compactTag: 0x22f, symIndex: 0x42, system: 0x4},
+ 104: {compactTag: 0x22f, symIndex: 0x1b, system: 0x0},
+ 105: {compactTag: 0x238, symIndex: 0x42, system: 0x4},
+ 106: {compactTag: 0x238, symIndex: 0x28, system: 0x0},
+ 107: {compactTag: 0x265, symIndex: 0x38, system: 0x3},
+ 108: {compactTag: 0x265, symIndex: 0x0, system: 0x0},
+ 109: {compactTag: 0x29d, symIndex: 0x22, system: 0x0},
+ 110: {compactTag: 0x29d, symIndex: 0x40, system: 0x3},
+ 111: {compactTag: 0x29d, symIndex: 0x48, system: 0x4},
+ 112: {compactTag: 0x29d, symIndex: 0x4d, system: 0xc},
+ 113: {compactTag: 0x2bd, symIndex: 0x31, system: 0x0},
+ 114: {compactTag: 0x2bd, symIndex: 0x3e, system: 0x3},
+ 115: {compactTag: 0x2bd, symIndex: 0x42, system: 0x4},
+ 116: {compactTag: 0x2cd, symIndex: 0x1b, system: 0x0},
+ 117: {compactTag: 0x2cd, symIndex: 0x49, system: 0x4},
+ 118: {compactTag: 0x2ce, symIndex: 0x49, system: 0x4},
+ 119: {compactTag: 0x2d0, symIndex: 0x33, system: 0x0},
+ 120: {compactTag: 0x2d0, symIndex: 0x4a, system: 0x4},
+ 121: {compactTag: 0x2d1, symIndex: 0x42, system: 0x4},
+ 122: {compactTag: 0x2d1, symIndex: 0x28, system: 0x0},
+ 123: {compactTag: 0x2d3, symIndex: 0x34, system: 0x0},
+ 124: {compactTag: 0x2d3, symIndex: 0x4b, system: 0x4},
+ 125: {compactTag: 0x2f9, symIndex: 0x0, system: 0x0},
+ 126: {compactTag: 0x2f9, symIndex: 0x38, system: 0x3},
+ 127: {compactTag: 0x2f9, symIndex: 0x42, system: 0x4},
+ 128: {compactTag: 0x2ff, symIndex: 0x36, system: 0x0},
+ 129: {compactTag: 0x2ff, symIndex: 0x41, system: 0x3},
+ 130: {compactTag: 0x2ff, symIndex: 0x4c, system: 0x4},
+} // Size: 810 bytes
+
+var tagToDecimal = []uint8{ // 775 elements
+ // Entry 0 - 3F
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x05, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ // Entry 40 - 7F
+ 0x05, 0x05, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x05, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x05, 0x05, 0x05, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x05, 0x05, 0x01, 0x01,
+ // Entry 80 - BF
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ // Entry C0 - FF
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ // Entry 100 - 13F
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ // Entry 140 - 17F
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x05, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x05,
+ 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ // Entry 180 - 1BF
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x05, 0x05, 0x05, 0x05,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ // Entry 1C0 - 1FF
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x05, 0x05,
+ 0x01, 0x01, 0x01, 0x05, 0x05, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ // Entry 200 - 23F
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x05, 0x05, 0x01, 0x01, 0x01, 0x05, 0x01,
+ 0x01, 0x05, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ // Entry 240 - 27F
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ // Entry 280 - 2BF
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x05,
+ 0x05, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ // Entry 2C0 - 2FF
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ // Entry 300 - 33F
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x08,
+} // Size: 799 bytes
+
+var tagToScientific = []uint8{ // 775 elements
+ // Entry 0 - 3F
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ // Entry 40 - 7F
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ // Entry 80 - BF
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ // Entry C0 - FF
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ // Entry 100 - 13F
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ // Entry 140 - 17F
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x0c, 0x0c, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x0c,
+ 0x0c, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ // Entry 180 - 1BF
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ // Entry 1C0 - 1FF
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x0d, 0x0d, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x0c, 0x0c, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ // Entry 200 - 23F
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x0c, 0x02,
+ 0x02, 0x0c, 0x0c, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ // Entry 240 - 27F
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x0d, 0x0d, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ // Entry 280 - 2BF
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ // Entry 2C0 - 2FF
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ // Entry 300 - 33F
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x09,
+} // Size: 799 bytes
+
+var tagToPercent = []uint8{ // 775 elements
+ // Entry 0 - 3F
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x06, 0x06, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x03, 0x03, 0x03, 0x03, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ // Entry 40 - 7F
+ 0x06, 0x06, 0x06, 0x04, 0x04, 0x04, 0x03, 0x03,
+ 0x06, 0x06, 0x03, 0x04, 0x04, 0x03, 0x03, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x06, 0x06, 0x06, 0x03,
+ 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x03, 0x03,
+ 0x03, 0x04, 0x04, 0x03, 0x03, 0x03, 0x04, 0x03,
+ 0x03, 0x04, 0x03, 0x04, 0x04, 0x03, 0x03, 0x03,
+ 0x03, 0x04, 0x04, 0x04, 0x07, 0x07, 0x04, 0x04,
+ // Entry 80 - BF
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x03, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x03, 0x04, 0x03, 0x04,
+ 0x04, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x06, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ // Entry C0 - FF
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x03, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
+ // Entry 100 - 13F
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x04, 0x04,
+ 0x0b, 0x0b, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x03, 0x03, 0x04, 0x04,
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x04, 0x03, 0x03,
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
+ // Entry 140 - 17F
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
+ 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
+ 0x06, 0x06, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x06,
+ 0x06, 0x04, 0x04, 0x04, 0x03, 0x03, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ // Entry 180 - 1BF
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x06, 0x06, 0x06, 0x06,
+ 0x04, 0x04, 0x04, 0x04, 0x03, 0x03, 0x04, 0x04,
+ // Entry 1C0 - 1FF
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x03, 0x03, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x03, 0x03, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ // Entry 200 - 23F
+ 0x04, 0x04, 0x04, 0x04, 0x03, 0x03, 0x03, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x06, 0x06, 0x04, 0x04, 0x04, 0x06, 0x04,
+ 0x04, 0x06, 0x06, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ // Entry 240 - 27F
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x03,
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
+ 0x03, 0x03, 0x04, 0x04, 0x03, 0x03, 0x03, 0x03,
+ 0x03, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x03, 0x03, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x03, 0x03, 0x03, 0x03, 0x04, 0x04,
+ // Entry 280 - 2BF
+ 0x04, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x03, 0x03, 0x03,
+ 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x06,
+ 0x06, 0x06, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x0e,
+ // Entry 2C0 - 2FF
+ 0x0e, 0x0e, 0x04, 0x03, 0x03, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x03,
+ 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ // Entry 300 - 33F
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x0a,
+} // Size: 799 bytes
+
+var formats = []Pattern{Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0,
+ MaxFractionDigits: 0,
+ Increment: 0x0,
+ IncrementScale: 0x0,
+ Mode: 0x0,
+ DigitShift: 0x0,
+ MinIntegerDigits: 0x0,
+ MaxIntegerDigits: 0x0,
+ MinFractionDigits: 0x0,
+ MinSignificantDigits: 0x0,
+ MinExponentDigits: 0x0},
+ Affix: "",
+ Offset: 0x0,
+ NegOffset: 0x0,
+ PadRune: 0,
+ FormatWidth: 0x0,
+ GroupingSize: [2]uint8{0x0,
+ 0x0},
+ Flags: 0x0},
+ Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0,
+ MaxFractionDigits: 3,
+ Increment: 0x0,
+ IncrementScale: 0x0,
+ Mode: 0x0,
+ DigitShift: 0x0,
+ MinIntegerDigits: 0x1,
+ MaxIntegerDigits: 0x0,
+ MinFractionDigits: 0x0,
+ MinSignificantDigits: 0x0,
+ MinExponentDigits: 0x0},
+ Affix: "",
+ Offset: 0x0,
+ NegOffset: 0x0,
+ PadRune: 0,
+ FormatWidth: 0x9,
+ GroupingSize: [2]uint8{0x3,
+ 0x0},
+ Flags: 0x0},
+ Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0,
+ MaxFractionDigits: 0,
+ Increment: 0x0,
+ IncrementScale: 0x0,
+ Mode: 0x0,
+ DigitShift: 0x0,
+ MinIntegerDigits: 0x0,
+ MaxIntegerDigits: 0x1,
+ MinFractionDigits: 0x0,
+ MinSignificantDigits: 0x0,
+ MinExponentDigits: 0x1},
+ Affix: "",
+ Offset: 0x0,
+ NegOffset: 0x0,
+ PadRune: 0,
+ FormatWidth: 0x3,
+ GroupingSize: [2]uint8{0x0,
+ 0x0},
+ Flags: 0x0},
+ Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0,
+ MaxFractionDigits: 0,
+ Increment: 0x0,
+ IncrementScale: 0x0,
+ Mode: 0x0,
+ DigitShift: 0x2,
+ MinIntegerDigits: 0x1,
+ MaxIntegerDigits: 0x0,
+ MinFractionDigits: 0x0,
+ MinSignificantDigits: 0x0,
+ MinExponentDigits: 0x0},
+ Affix: "\x00\x03\u00a0%",
+ Offset: 0x0,
+ NegOffset: 0x0,
+ PadRune: 0,
+ FormatWidth: 0x7,
+ GroupingSize: [2]uint8{0x3,
+ 0x0},
+ Flags: 0x0},
+ Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0,
+ MaxFractionDigits: 0,
+ Increment: 0x0,
+ IncrementScale: 0x0,
+ Mode: 0x0,
+ DigitShift: 0x2,
+ MinIntegerDigits: 0x1,
+ MaxIntegerDigits: 0x0,
+ MinFractionDigits: 0x0,
+ MinSignificantDigits: 0x0,
+ MinExponentDigits: 0x0},
+ Affix: "\x00\x01%",
+ Offset: 0x0,
+ NegOffset: 0x0,
+ PadRune: 0,
+ FormatWidth: 0x6,
+ GroupingSize: [2]uint8{0x3,
+ 0x0},
+ Flags: 0x0},
+ Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0,
+ MaxFractionDigits: 3,
+ Increment: 0x0,
+ IncrementScale: 0x0,
+ Mode: 0x0,
+ DigitShift: 0x0,
+ MinIntegerDigits: 0x1,
+ MaxIntegerDigits: 0x0,
+ MinFractionDigits: 0x0,
+ MinSignificantDigits: 0x0,
+ MinExponentDigits: 0x0},
+ Affix: "",
+ Offset: 0x0,
+ NegOffset: 0x0,
+ PadRune: 0,
+ FormatWidth: 0xc,
+ GroupingSize: [2]uint8{0x3,
+ 0x2},
+ Flags: 0x0},
+ Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0,
+ MaxFractionDigits: 0,
+ Increment: 0x0,
+ IncrementScale: 0x0,
+ Mode: 0x0,
+ DigitShift: 0x2,
+ MinIntegerDigits: 0x1,
+ MaxIntegerDigits: 0x0,
+ MinFractionDigits: 0x0,
+ MinSignificantDigits: 0x0,
+ MinExponentDigits: 0x0},
+ Affix: "\x00\x01%",
+ Offset: 0x0,
+ NegOffset: 0x0,
+ PadRune: 0,
+ FormatWidth: 0x9,
+ GroupingSize: [2]uint8{0x3,
+ 0x2},
+ Flags: 0x0},
+ Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0,
+ MaxFractionDigits: 0,
+ Increment: 0x0,
+ IncrementScale: 0x0,
+ Mode: 0x0,
+ DigitShift: 0x2,
+ MinIntegerDigits: 0x1,
+ MaxIntegerDigits: 0x0,
+ MinFractionDigits: 0x0,
+ MinSignificantDigits: 0x0,
+ MinExponentDigits: 0x0},
+ Affix: "\x00\x03\u00a0%",
+ Offset: 0x0,
+ NegOffset: 0x0,
+ PadRune: 0,
+ FormatWidth: 0xa,
+ GroupingSize: [2]uint8{0x3,
+ 0x2},
+ Flags: 0x0},
+ Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0,
+ MaxFractionDigits: 6,
+ Increment: 0x0,
+ IncrementScale: 0x0,
+ Mode: 0x0,
+ DigitShift: 0x0,
+ MinIntegerDigits: 0x1,
+ MaxIntegerDigits: 0x0,
+ MinFractionDigits: 0x0,
+ MinSignificantDigits: 0x0,
+ MinExponentDigits: 0x0},
+ Affix: "",
+ Offset: 0x0,
+ NegOffset: 0x0,
+ PadRune: 0,
+ FormatWidth: 0x8,
+ GroupingSize: [2]uint8{0x0,
+ 0x0},
+ Flags: 0x0},
+ Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0,
+ MaxFractionDigits: 6,
+ Increment: 0x0,
+ IncrementScale: 0x0,
+ Mode: 0x0,
+ DigitShift: 0x0,
+ MinIntegerDigits: 0x1,
+ MaxIntegerDigits: 0x0,
+ MinFractionDigits: 0x6,
+ MinSignificantDigits: 0x0,
+ MinExponentDigits: 0x3},
+ Affix: "",
+ Offset: 0x0,
+ NegOffset: 0x0,
+ PadRune: 0,
+ FormatWidth: 0xd,
+ GroupingSize: [2]uint8{0x0,
+ 0x0},
+ Flags: 0x4},
+ Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0,
+ MaxFractionDigits: 0,
+ Increment: 0x0,
+ IncrementScale: 0x0,
+ Mode: 0x0,
+ DigitShift: 0x2,
+ MinIntegerDigits: 0x1,
+ MaxIntegerDigits: 0x0,
+ MinFractionDigits: 0x0,
+ MinSignificantDigits: 0x0,
+ MinExponentDigits: 0x0},
+ Affix: "\x00\x01%",
+ Offset: 0x0,
+ NegOffset: 0x0,
+ PadRune: 0,
+ FormatWidth: 0x2,
+ GroupingSize: [2]uint8{0x0,
+ 0x0},
+ Flags: 0x0},
+ Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0,
+ MaxFractionDigits: 0,
+ Increment: 0x0,
+ IncrementScale: 0x0,
+ Mode: 0x0,
+ DigitShift: 0x2,
+ MinIntegerDigits: 0x1,
+ MaxIntegerDigits: 0x0,
+ MinFractionDigits: 0x0,
+ MinSignificantDigits: 0x0,
+ MinExponentDigits: 0x0},
+ Affix: "\x03%\u00a0\x00",
+ Offset: 0x0,
+ NegOffset: 0x0,
+ PadRune: 0,
+ FormatWidth: 0x7,
+ GroupingSize: [2]uint8{0x3,
+ 0x0},
+ Flags: 0x0},
+ Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0,
+ MaxFractionDigits: 0,
+ Increment: 0x0,
+ IncrementScale: 0x0,
+ Mode: 0x0,
+ DigitShift: 0x0,
+ MinIntegerDigits: 0x0,
+ MaxIntegerDigits: 0x1,
+ MinFractionDigits: 0x0,
+ MinSignificantDigits: 0x0,
+ MinExponentDigits: 0x1},
+ Affix: "\x01[\x01]",
+ Offset: 0x0,
+ NegOffset: 0x0,
+ PadRune: 0,
+ FormatWidth: 0x5,
+ GroupingSize: [2]uint8{0x0,
+ 0x0},
+ Flags: 0x0},
+ Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0,
+ MaxFractionDigits: 0,
+ Increment: 0x0,
+ IncrementScale: 0x0,
+ Mode: 0x0,
+ DigitShift: 0x0,
+ MinIntegerDigits: 0x0,
+ MaxIntegerDigits: 0x0,
+ MinFractionDigits: 0x0,
+ MinSignificantDigits: 0x0,
+ MinExponentDigits: 0x0},
+ Affix: "",
+ Offset: 0x0,
+ NegOffset: 0x0,
+ PadRune: 0,
+ FormatWidth: 0x1,
+ GroupingSize: [2]uint8{0x0,
+ 0x0},
+ Flags: 0x0},
+ Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0,
+ MaxFractionDigits: 0,
+ Increment: 0x0,
+ IncrementScale: 0x0,
+ Mode: 0x0,
+ DigitShift: 0x2,
+ MinIntegerDigits: 0x1,
+ MaxIntegerDigits: 0x0,
+ MinFractionDigits: 0x0,
+ MinSignificantDigits: 0x0,
+ MinExponentDigits: 0x0},
+ Affix: "\x01%\x00",
+ Offset: 0x0,
+ NegOffset: 0x0,
+ PadRune: 0,
+ FormatWidth: 0x6,
+ GroupingSize: [2]uint8{0x3,
+ 0x0},
+ Flags: 0x0}}
+
+// Total table size 8634 bytes (8KiB); checksum: 8F23386D
diff --git a/vendor/golang.org/x/text/internal/stringset/set.go b/vendor/golang.org/x/text/internal/stringset/set.go
new file mode 100644
index 000000000..bb2fffbc7
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/stringset/set.go
@@ -0,0 +1,86 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package stringset provides a way to represent a collection of strings
+// compactly.
+package stringset
+
+import "sort"
+
+// A Set holds a collection of strings that can be looked up by an index number.
+type Set struct {
+ // These fields are exported to allow for code generation.
+
+ Data string
+ Index []uint16
+}
+
+// Elem returns the string with index i. It panics if i is out of range.
+func (s *Set) Elem(i int) string {
+ return s.Data[s.Index[i]:s.Index[i+1]]
+}
+
+// Len returns the number of strings in the set.
+func (s *Set) Len() int {
+ return len(s.Index) - 1
+}
+
+// Search returns the index of the given string or -1 if it is not in the set.
+// The Set must have been created with strings in sorted order.
+func Search(s *Set, str string) int {
+ // TODO: optimize this if it gets used a lot.
+ n := len(s.Index) - 1
+ p := sort.Search(n, func(i int) bool {
+ return s.Elem(i) >= str
+ })
+ if p == n || str != s.Elem(p) {
+ return -1
+ }
+ return p
+}
+
+// A Builder constructs Sets.
+type Builder struct {
+ set Set
+ index map[string]int
+}
+
+// NewBuilder returns a new and initialized Builder.
+func NewBuilder() *Builder {
+ return &Builder{
+ set: Set{
+ Index: []uint16{0},
+ },
+ index: map[string]int{},
+ }
+}
+
+// Set creates the set created so far.
+func (b *Builder) Set() Set {
+ return b.set
+}
+
+// Index returns the index for the given string, which must have been added
+// before.
+func (b *Builder) Index(s string) int {
+ return b.index[s]
+}
+
+// Add adds a string to the index. Strings that are added by a single Add will
+// be stored together, unless they match an existing string.
+func (b *Builder) Add(ss ...string) {
+ // First check if the string already exists.
+ for _, s := range ss {
+ if _, ok := b.index[s]; ok {
+ continue
+ }
+ b.index[s] = len(b.set.Index) - 1
+ b.set.Data += s
+ x := len(b.set.Data)
+ if x > 0xFFFF {
+ panic("Index too > 0xFFFF")
+ }
+ b.set.Index = append(b.set.Index, uint16(x))
+ }
+}
diff --git a/vendor/golang.org/x/text/internal/tag/tag.go b/vendor/golang.org/x/text/internal/tag/tag.go
new file mode 100644
index 000000000..b5d348891
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/tag/tag.go
@@ -0,0 +1,100 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package tag contains functionality handling tags and related data.
+package tag // import "golang.org/x/text/internal/tag"
+
+import "sort"
+
+// An Index converts tags to a compact numeric value.
+//
+// All elements are of size 4. Tags may be up to 4 bytes long. Excess bytes can
+// be used to store additional information about the tag.
+type Index string
+
+// Elem returns the element data at the given index.
+func (s Index) Elem(x int) string {
+ return string(s[x*4 : x*4+4])
+}
+
+// Index reports the index of the given key or -1 if it could not be found.
+// Only the first len(key) bytes from the start of the 4-byte entries will be
+// considered for the search and the first match in Index will be returned.
+func (s Index) Index(key []byte) int {
+ n := len(key)
+ // search the index of the first entry with an equal or higher value than
+ // key in s.
+ index := sort.Search(len(s)/4, func(i int) bool {
+ return cmp(s[i*4:i*4+n], key) != -1
+ })
+ i := index * 4
+ if cmp(s[i:i+len(key)], key) != 0 {
+ return -1
+ }
+ return index
+}
+
+// Next finds the next occurrence of key after index x, which must have been
+// obtained from a call to Index using the same key. It returns x+1 or -1.
+func (s Index) Next(key []byte, x int) int {
+ if x++; x*4 < len(s) && cmp(s[x*4:x*4+len(key)], key) == 0 {
+ return x
+ }
+ return -1
+}
+
+// cmp returns an integer comparing a and b lexicographically.
+func cmp(a Index, b []byte) int {
+ n := len(a)
+ if len(b) < n {
+ n = len(b)
+ }
+ for i, c := range b[:n] {
+ switch {
+ case a[i] > c:
+ return 1
+ case a[i] < c:
+ return -1
+ }
+ }
+ switch {
+ case len(a) < len(b):
+ return -1
+ case len(a) > len(b):
+ return 1
+ }
+ return 0
+}
+
+// Compare returns an integer comparing a and b lexicographically.
+func Compare(a string, b []byte) int {
+ return cmp(Index(a), b)
+}
+
+// FixCase reformats b to the same pattern of cases as form.
+// If returns false if string b is malformed.
+func FixCase(form string, b []byte) bool {
+ if len(form) != len(b) {
+ return false
+ }
+ for i, c := range b {
+ if form[i] <= 'Z' {
+ if c >= 'a' {
+ c -= 'z' - 'Z'
+ }
+ if c < 'A' || 'Z' < c {
+ return false
+ }
+ } else {
+ if c <= 'Z' {
+ c += 'z' - 'Z'
+ }
+ if c < 'a' || 'z' < c {
+ return false
+ }
+ }
+ b[i] = c
+ }
+ return true
+}
diff --git a/vendor/golang.org/x/text/language/coverage.go b/vendor/golang.org/x/text/language/coverage.go
new file mode 100644
index 000000000..a24fd1a4d
--- /dev/null
+++ b/vendor/golang.org/x/text/language/coverage.go
@@ -0,0 +1,187 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package language
+
+import (
+ "fmt"
+ "sort"
+
+ "golang.org/x/text/internal/language"
+)
+
+// The Coverage interface is used to define the level of coverage of an
+// internationalization service. Note that not all types are supported by all
+// services. As lists may be generated on the fly, it is recommended that users
+// of a Coverage cache the results.
+type Coverage interface {
+ // Tags returns the list of supported tags.
+ Tags() []Tag
+
+ // BaseLanguages returns the list of supported base languages.
+ BaseLanguages() []Base
+
+ // Scripts returns the list of supported scripts.
+ Scripts() []Script
+
+ // Regions returns the list of supported regions.
+ Regions() []Region
+}
+
+var (
+ // Supported defines a Coverage that lists all supported subtags. Tags
+ // always returns nil.
+ Supported Coverage = allSubtags{}
+)
+
+// TODO:
+// - Support Variants, numbering systems.
+// - CLDR coverage levels.
+// - Set of common tags defined in this package.
+
+type allSubtags struct{}
+
+// Regions returns the list of supported regions. As all regions are in a
+// consecutive range, it simply returns a slice of numbers in increasing order.
+// The "undefined" region is not returned.
+func (s allSubtags) Regions() []Region {
+ reg := make([]Region, language.NumRegions)
+ for i := range reg {
+ reg[i] = Region{language.Region(i + 1)}
+ }
+ return reg
+}
+
+// Scripts returns the list of supported scripts. As all scripts are in a
+// consecutive range, it simply returns a slice of numbers in increasing order.
+// The "undefined" script is not returned.
+func (s allSubtags) Scripts() []Script {
+ scr := make([]Script, language.NumScripts)
+ for i := range scr {
+ scr[i] = Script{language.Script(i + 1)}
+ }
+ return scr
+}
+
+// BaseLanguages returns the list of all supported base languages. It generates
+// the list by traversing the internal structures.
+func (s allSubtags) BaseLanguages() []Base {
+ bs := language.BaseLanguages()
+ base := make([]Base, len(bs))
+ for i, b := range bs {
+ base[i] = Base{b}
+ }
+ return base
+}
+
+// Tags always returns nil.
+func (s allSubtags) Tags() []Tag {
+ return nil
+}
+
+// coverage is used by NewCoverage which is used as a convenient way for
+// creating Coverage implementations for partially defined data. Very often a
+// package will only need to define a subset of slices. coverage provides a
+// convenient way to do this. Moreover, packages using NewCoverage, instead of
+// their own implementation, will not break if later new slice types are added.
+type coverage struct {
+ tags func() []Tag
+ bases func() []Base
+ scripts func() []Script
+ regions func() []Region
+}
+
+func (s *coverage) Tags() []Tag {
+ if s.tags == nil {
+ return nil
+ }
+ return s.tags()
+}
+
+// bases implements sort.Interface and is used to sort base languages.
+type bases []Base
+
+func (b bases) Len() int {
+ return len(b)
+}
+
+func (b bases) Swap(i, j int) {
+ b[i], b[j] = b[j], b[i]
+}
+
+func (b bases) Less(i, j int) bool {
+ return b[i].langID < b[j].langID
+}
+
+// BaseLanguages returns the result from calling s.bases if it is specified or
+// otherwise derives the set of supported base languages from tags.
+func (s *coverage) BaseLanguages() []Base {
+ if s.bases == nil {
+ tags := s.Tags()
+ if len(tags) == 0 {
+ return nil
+ }
+ a := make([]Base, len(tags))
+ for i, t := range tags {
+ a[i] = Base{language.Language(t.lang())}
+ }
+ sort.Sort(bases(a))
+ k := 0
+ for i := 1; i < len(a); i++ {
+ if a[k] != a[i] {
+ k++
+ a[k] = a[i]
+ }
+ }
+ return a[:k+1]
+ }
+ return s.bases()
+}
+
+func (s *coverage) Scripts() []Script {
+ if s.scripts == nil {
+ return nil
+ }
+ return s.scripts()
+}
+
+func (s *coverage) Regions() []Region {
+ if s.regions == nil {
+ return nil
+ }
+ return s.regions()
+}
+
+// NewCoverage returns a Coverage for the given lists. It is typically used by
+// packages providing internationalization services to define their level of
+// coverage. A list may be of type []T or func() []T, where T is either Tag,
+// Base, Script or Region. The returned Coverage derives the value for Bases
+// from Tags if no func or slice for []Base is specified. For other unspecified
+// types the returned Coverage will return nil for the respective methods.
+func NewCoverage(list ...interface{}) Coverage {
+ s := &coverage{}
+ for _, x := range list {
+ switch v := x.(type) {
+ case func() []Base:
+ s.bases = v
+ case func() []Script:
+ s.scripts = v
+ case func() []Region:
+ s.regions = v
+ case func() []Tag:
+ s.tags = v
+ case []Base:
+ s.bases = func() []Base { return v }
+ case []Script:
+ s.scripts = func() []Script { return v }
+ case []Region:
+ s.regions = func() []Region { return v }
+ case []Tag:
+ s.tags = func() []Tag { return v }
+ default:
+ panic(fmt.Sprintf("language: unsupported set type %T", v))
+ }
+ }
+ return s
+}
diff --git a/vendor/golang.org/x/text/language/doc.go b/vendor/golang.org/x/text/language/doc.go
new file mode 100644
index 000000000..212b77c90
--- /dev/null
+++ b/vendor/golang.org/x/text/language/doc.go
@@ -0,0 +1,98 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package language implements BCP 47 language tags and related functionality.
+//
+// The most important function of package language is to match a list of
+// user-preferred languages to a list of supported languages.
+// It alleviates the developer of dealing with the complexity of this process
+// and provides the user with the best experience
+// (see https://blog.golang.org/matchlang).
+//
+// # Matching preferred against supported languages
+//
+// A Matcher for an application that supports English, Australian English,
+// Danish, and standard Mandarin can be created as follows:
+//
+// var matcher = language.NewMatcher([]language.Tag{
+// language.English, // The first language is used as fallback.
+// language.MustParse("en-AU"),
+// language.Danish,
+// language.Chinese,
+// })
+//
+// This list of supported languages is typically implied by the languages for
+// which there exists translations of the user interface.
+//
+// User-preferred languages usually come as a comma-separated list of BCP 47
+// language tags.
+// The MatchString finds best matches for such strings:
+//
+// handler(w http.ResponseWriter, r *http.Request) {
+// lang, _ := r.Cookie("lang")
+// accept := r.Header.Get("Accept-Language")
+// tag, _ := language.MatchStrings(matcher, lang.String(), accept)
+//
+// // tag should now be used for the initialization of any
+// // locale-specific service.
+// }
+//
+// The Matcher's Match method can be used to match Tags directly.
+//
+// Matchers are aware of the intricacies of equivalence between languages, such
+// as deprecated subtags, legacy tags, macro languages, mutual
+// intelligibility between scripts and languages, and transparently passing
+// BCP 47 user configuration.
+// For instance, it will know that a reader of Bokmål Danish can read Norwegian
+// and will know that Cantonese ("yue") is a good match for "zh-HK".
+//
+// # Using match results
+//
+// To guarantee a consistent user experience to the user it is important to
+// use the same language tag for the selection of any locale-specific services.
+// For example, it is utterly confusing to substitute spelled-out numbers
+// or dates in one language in text of another language.
+// More subtly confusing is using the wrong sorting order or casing
+// algorithm for a certain language.
+//
+// All the packages in x/text that provide locale-specific services
+// (e.g. collate, cases) should be initialized with the tag that was
+// obtained at the start of an interaction with the user.
+//
+// Note that Tag that is returned by Match and MatchString may differ from any
+// of the supported languages, as it may contain carried over settings from
+// the user tags.
+// This may be inconvenient when your application has some additional
+// locale-specific data for your supported languages.
+// Match and MatchString both return the index of the matched supported tag
+// to simplify associating such data with the matched tag.
+//
+// # Canonicalization
+//
+// If one uses the Matcher to compare languages one does not need to
+// worry about canonicalization.
+//
+// The meaning of a Tag varies per application. The language package
+// therefore delays canonicalization and preserves information as much
+// as possible. The Matcher, however, will always take into account that
+// two different tags may represent the same language.
+//
+// By default, only legacy and deprecated tags are converted into their
+// canonical equivalent. All other information is preserved. This approach makes
+// the confidence scores more accurate and allows matchers to distinguish
+// between variants that are otherwise lost.
+//
+// As a consequence, two tags that should be treated as identical according to
+// BCP 47 or CLDR, like "en-Latn" and "en", will be represented differently. The
+// Matcher handles such distinctions, though, and is aware of the
+// equivalence relations. The CanonType type can be used to alter the
+// canonicalization form.
+//
+// # References
+//
+// BCP 47 - Tags for Identifying Languages http://tools.ietf.org/html/bcp47
+package language // import "golang.org/x/text/language"
+
+// TODO: explanation on how to match languages for your own locale-specific
+// service.
diff --git a/vendor/golang.org/x/text/language/language.go b/vendor/golang.org/x/text/language/language.go
new file mode 100644
index 000000000..4d9c66121
--- /dev/null
+++ b/vendor/golang.org/x/text/language/language.go
@@ -0,0 +1,605 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate go run gen.go -output tables.go
+
+package language
+
+// TODO: Remove above NOTE after:
+// - verifying that tables are dropped correctly (most notably matcher tables).
+
+import (
+ "strings"
+
+ "golang.org/x/text/internal/language"
+ "golang.org/x/text/internal/language/compact"
+)
+
+// Tag represents a BCP 47 language tag. It is used to specify an instance of a
+// specific language or locale. All language tag values are guaranteed to be
+// well-formed.
+type Tag compact.Tag
+
+func makeTag(t language.Tag) (tag Tag) {
+ return Tag(compact.Make(t))
+}
+
+func (t *Tag) tag() language.Tag {
+ return (*compact.Tag)(t).Tag()
+}
+
+func (t *Tag) isCompact() bool {
+ return (*compact.Tag)(t).IsCompact()
+}
+
+// TODO: improve performance.
+func (t *Tag) lang() language.Language { return t.tag().LangID }
+func (t *Tag) region() language.Region { return t.tag().RegionID }
+func (t *Tag) script() language.Script { return t.tag().ScriptID }
+
+// Make is a convenience wrapper for Parse that omits the error.
+// In case of an error, a sensible default is returned.
+func Make(s string) Tag {
+ return Default.Make(s)
+}
+
+// Make is a convenience wrapper for c.Parse that omits the error.
+// In case of an error, a sensible default is returned.
+func (c CanonType) Make(s string) Tag {
+ t, _ := c.Parse(s)
+ return t
+}
+
+// Raw returns the raw base language, script and region, without making an
+// attempt to infer their values.
+func (t Tag) Raw() (b Base, s Script, r Region) {
+ tt := t.tag()
+ return Base{tt.LangID}, Script{tt.ScriptID}, Region{tt.RegionID}
+}
+
+// IsRoot returns true if t is equal to language "und".
+func (t Tag) IsRoot() bool {
+ return compact.Tag(t).IsRoot()
+}
+
+// CanonType can be used to enable or disable various types of canonicalization.
+type CanonType int
+
+const (
+ // Replace deprecated base languages with their preferred replacements.
+ DeprecatedBase CanonType = 1 << iota
+ // Replace deprecated scripts with their preferred replacements.
+ DeprecatedScript
+ // Replace deprecated regions with their preferred replacements.
+ DeprecatedRegion
+ // Remove redundant scripts.
+ SuppressScript
+ // Normalize legacy encodings. This includes legacy languages defined in
+ // CLDR as well as bibliographic codes defined in ISO-639.
+ Legacy
+ // Map the dominant language of a macro language group to the macro language
+ // subtag. For example cmn -> zh.
+ Macro
+ // The CLDR flag should be used if full compatibility with CLDR is required.
+ // There are a few cases where language.Tag may differ from CLDR. To follow all
+ // of CLDR's suggestions, use All|CLDR.
+ CLDR
+
+ // Raw can be used to Compose or Parse without Canonicalization.
+ Raw CanonType = 0
+
+ // Replace all deprecated tags with their preferred replacements.
+ Deprecated = DeprecatedBase | DeprecatedScript | DeprecatedRegion
+
+ // All canonicalizations recommended by BCP 47.
+ BCP47 = Deprecated | SuppressScript
+
+ // All canonicalizations.
+ All = BCP47 | Legacy | Macro
+
+ // Default is the canonicalization used by Parse, Make and Compose. To
+ // preserve as much information as possible, canonicalizations that remove
+ // potentially valuable information are not included. The Matcher is
+ // designed to recognize similar tags that would be the same if
+ // they were canonicalized using All.
+ Default = Deprecated | Legacy
+
+ canonLang = DeprecatedBase | Legacy | Macro
+
+ // TODO: LikelyScript, LikelyRegion: suppress similar to ICU.
+)
+
+// canonicalize returns the canonicalized equivalent of the tag and
+// whether there was any change.
+func canonicalize(c CanonType, t language.Tag) (language.Tag, bool) {
+ if c == Raw {
+ return t, false
+ }
+ changed := false
+ if c&SuppressScript != 0 {
+ if t.LangID.SuppressScript() == t.ScriptID {
+ t.ScriptID = 0
+ changed = true
+ }
+ }
+ if c&canonLang != 0 {
+ for {
+ if l, aliasType := t.LangID.Canonicalize(); l != t.LangID {
+ switch aliasType {
+ case language.Legacy:
+ if c&Legacy != 0 {
+ if t.LangID == _sh && t.ScriptID == 0 {
+ t.ScriptID = _Latn
+ }
+ t.LangID = l
+ changed = true
+ }
+ case language.Macro:
+ if c&Macro != 0 {
+ // We deviate here from CLDR. The mapping "nb" -> "no"
+ // qualifies as a typical Macro language mapping. However,
+ // for legacy reasons, CLDR maps "no", the macro language
+ // code for Norwegian, to the dominant variant "nb". This
+ // change is currently under consideration for CLDR as well.
+ // See https://unicode.org/cldr/trac/ticket/2698 and also
+ // https://unicode.org/cldr/trac/ticket/1790 for some of the
+ // practical implications. TODO: this check could be removed
+ // if CLDR adopts this change.
+ if c&CLDR == 0 || t.LangID != _nb {
+ changed = true
+ t.LangID = l
+ }
+ }
+ case language.Deprecated:
+ if c&DeprecatedBase != 0 {
+ if t.LangID == _mo && t.RegionID == 0 {
+ t.RegionID = _MD
+ }
+ t.LangID = l
+ changed = true
+ // Other canonicalization types may still apply.
+ continue
+ }
+ }
+ } else if c&Legacy != 0 && t.LangID == _no && c&CLDR != 0 {
+ t.LangID = _nb
+ changed = true
+ }
+ break
+ }
+ }
+ if c&DeprecatedScript != 0 {
+ if t.ScriptID == _Qaai {
+ changed = true
+ t.ScriptID = _Zinh
+ }
+ }
+ if c&DeprecatedRegion != 0 {
+ if r := t.RegionID.Canonicalize(); r != t.RegionID {
+ changed = true
+ t.RegionID = r
+ }
+ }
+ return t, changed
+}
+
+// Canonicalize returns the canonicalized equivalent of the tag.
+func (c CanonType) Canonicalize(t Tag) (Tag, error) {
+ // First try fast path.
+ if t.isCompact() {
+ if _, changed := canonicalize(c, compact.Tag(t).Tag()); !changed {
+ return t, nil
+ }
+ }
+ // It is unlikely that one will canonicalize a tag after matching. So do
+ // a slow but simple approach here.
+ if tag, changed := canonicalize(c, t.tag()); changed {
+ tag.RemakeString()
+ return makeTag(tag), nil
+ }
+ return t, nil
+
+}
+
+// Confidence indicates the level of certainty for a given return value.
+// For example, Serbian may be written in Cyrillic or Latin script.
+// The confidence level indicates whether a value was explicitly specified,
+// whether it is typically the only possible value, or whether there is
+// an ambiguity.
+type Confidence int
+
+const (
+ No Confidence = iota // full confidence that there was no match
+ Low // most likely value picked out of a set of alternatives
+ High // value is generally assumed to be the correct match
+ Exact // exact match or explicitly specified value
+)
+
+var confName = []string{"No", "Low", "High", "Exact"}
+
+func (c Confidence) String() string {
+ return confName[c]
+}
+
+// String returns the canonical string representation of the language tag.
+func (t Tag) String() string {
+ return t.tag().String()
+}
+
+// MarshalText implements encoding.TextMarshaler.
+func (t Tag) MarshalText() (text []byte, err error) {
+ return t.tag().MarshalText()
+}
+
+// UnmarshalText implements encoding.TextUnmarshaler.
+func (t *Tag) UnmarshalText(text []byte) error {
+ var tag language.Tag
+ err := tag.UnmarshalText(text)
+ *t = makeTag(tag)
+ return err
+}
+
+// Base returns the base language of the language tag. If the base language is
+// unspecified, an attempt will be made to infer it from the context.
+// It uses a variant of CLDR's Add Likely Subtags algorithm. This is subject to change.
+func (t Tag) Base() (Base, Confidence) {
+ if b := t.lang(); b != 0 {
+ return Base{b}, Exact
+ }
+ tt := t.tag()
+ c := High
+ if tt.ScriptID == 0 && !tt.RegionID.IsCountry() {
+ c = Low
+ }
+ if tag, err := tt.Maximize(); err == nil && tag.LangID != 0 {
+ return Base{tag.LangID}, c
+ }
+ return Base{0}, No
+}
+
+// Script infers the script for the language tag. If it was not explicitly given, it will infer
+// a most likely candidate.
+// If more than one script is commonly used for a language, the most likely one
+// is returned with a low confidence indication. For example, it returns (Cyrl, Low)
+// for Serbian.
+// If a script cannot be inferred (Zzzz, No) is returned. We do not use Zyyy (undetermined)
+// as one would suspect from the IANA registry for BCP 47. In a Unicode context Zyyy marks
+// common characters (like 1, 2, 3, '.', etc.) and is therefore more like multiple scripts.
+// See https://www.unicode.org/reports/tr24/#Values for more details. Zzzz is also used for
+// unknown value in CLDR. (Zzzz, Exact) is returned if Zzzz was explicitly specified.
+// Note that an inferred script is never guaranteed to be the correct one. Latin is
+// almost exclusively used for Afrikaans, but Arabic has been used for some texts
+// in the past. Also, the script that is commonly used may change over time.
+// It uses a variant of CLDR's Add Likely Subtags algorithm. This is subject to change.
+func (t Tag) Script() (Script, Confidence) {
+ if scr := t.script(); scr != 0 {
+ return Script{scr}, Exact
+ }
+ tt := t.tag()
+ sc, c := language.Script(_Zzzz), No
+ if scr := tt.LangID.SuppressScript(); scr != 0 {
+ // Note: it is not always the case that a language with a suppress
+ // script value is only written in one script (e.g. kk, ms, pa).
+ if tt.RegionID == 0 {
+ return Script{scr}, High
+ }
+ sc, c = scr, High
+ }
+ if tag, err := tt.Maximize(); err == nil {
+ if tag.ScriptID != sc {
+ sc, c = tag.ScriptID, Low
+ }
+ } else {
+ tt, _ = canonicalize(Deprecated|Macro, tt)
+ if tag, err := tt.Maximize(); err == nil && tag.ScriptID != sc {
+ sc, c = tag.ScriptID, Low
+ }
+ }
+ return Script{sc}, c
+}
+
+// Region returns the region for the language tag. If it was not explicitly given, it will
+// infer a most likely candidate from the context.
+// It uses a variant of CLDR's Add Likely Subtags algorithm. This is subject to change.
+func (t Tag) Region() (Region, Confidence) {
+ if r := t.region(); r != 0 {
+ return Region{r}, Exact
+ }
+ tt := t.tag()
+ if tt, err := tt.Maximize(); err == nil {
+ return Region{tt.RegionID}, Low // TODO: differentiate between high and low.
+ }
+ tt, _ = canonicalize(Deprecated|Macro, tt)
+ if tag, err := tt.Maximize(); err == nil {
+ return Region{tag.RegionID}, Low
+ }
+ return Region{_ZZ}, No // TODO: return world instead of undetermined?
+}
+
+// Variants returns the variants specified explicitly for this language tag.
+// or nil if no variant was specified.
+func (t Tag) Variants() []Variant {
+ if !compact.Tag(t).MayHaveVariants() {
+ return nil
+ }
+ v := []Variant{}
+ x, str := "", t.tag().Variants()
+ for str != "" {
+ x, str = nextToken(str)
+ v = append(v, Variant{x})
+ }
+ return v
+}
+
+// Parent returns the CLDR parent of t. In CLDR, missing fields in data for a
+// specific language are substituted with fields from the parent language.
+// The parent for a language may change for newer versions of CLDR.
+//
+// Parent returns a tag for a less specific language that is mutually
+// intelligible or Und if there is no such language. This may not be the same as
+// simply stripping the last BCP 47 subtag. For instance, the parent of "zh-TW"
+// is "zh-Hant", and the parent of "zh-Hant" is "und".
+func (t Tag) Parent() Tag {
+ return Tag(compact.Tag(t).Parent())
+}
+
+// nextToken returns token t and the rest of the string.
+func nextToken(s string) (t, tail string) {
+ p := strings.Index(s[1:], "-")
+ if p == -1 {
+ return s[1:], ""
+ }
+ p++
+ return s[1:p], s[p:]
+}
+
+// Extension is a single BCP 47 extension.
+type Extension struct {
+ s string
+}
+
+// String returns the string representation of the extension, including the
+// type tag.
+func (e Extension) String() string {
+ return e.s
+}
+
+// ParseExtension parses s as an extension and returns it on success.
+func ParseExtension(s string) (e Extension, err error) {
+ ext, err := language.ParseExtension(s)
+ return Extension{ext}, err
+}
+
+// Type returns the one-byte extension type of e. It returns 0 for the zero
+// exception.
+func (e Extension) Type() byte {
+ if e.s == "" {
+ return 0
+ }
+ return e.s[0]
+}
+
+// Tokens returns the list of tokens of e.
+func (e Extension) Tokens() []string {
+ return strings.Split(e.s, "-")
+}
+
+// Extension returns the extension of type x for tag t. It will return
+// false for ok if t does not have the requested extension. The returned
+// extension will be invalid in this case.
+func (t Tag) Extension(x byte) (ext Extension, ok bool) {
+ if !compact.Tag(t).MayHaveExtensions() {
+ return Extension{}, false
+ }
+ e, ok := t.tag().Extension(x)
+ return Extension{e}, ok
+}
+
+// Extensions returns all extensions of t.
+func (t Tag) Extensions() []Extension {
+ if !compact.Tag(t).MayHaveExtensions() {
+ return nil
+ }
+ e := []Extension{}
+ for _, ext := range t.tag().Extensions() {
+ e = append(e, Extension{ext})
+ }
+ return e
+}
+
+// TypeForKey returns the type associated with the given key, where key and type
+// are of the allowed values defined for the Unicode locale extension ('u') in
+// https://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers.
+// TypeForKey will traverse the inheritance chain to get the correct value.
+//
+// If there are multiple types associated with a key, only the first will be
+// returned. If there is no type associated with a key, it returns the empty
+// string.
+func (t Tag) TypeForKey(key string) string {
+ if !compact.Tag(t).MayHaveExtensions() {
+ if key != "rg" && key != "va" {
+ return ""
+ }
+ }
+ return t.tag().TypeForKey(key)
+}
+
+// SetTypeForKey returns a new Tag with the key set to type, where key and type
+// are of the allowed values defined for the Unicode locale extension ('u') in
+// https://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers.
+// An empty value removes an existing pair with the same key.
+func (t Tag) SetTypeForKey(key, value string) (Tag, error) {
+ tt, err := t.tag().SetTypeForKey(key, value)
+ return makeTag(tt), err
+}
+
+// NumCompactTags is the number of compact tags. The maximum tag is
+// NumCompactTags-1.
+const NumCompactTags = compact.NumCompactTags
+
+// CompactIndex returns an index, where 0 <= index < NumCompactTags, for tags
+// for which data exists in the text repository.The index will change over time
+// and should not be stored in persistent storage. If t does not match a compact
+// index, exact will be false and the compact index will be returned for the
+// first match after repeatedly taking the Parent of t.
+func CompactIndex(t Tag) (index int, exact bool) {
+ id, exact := compact.LanguageID(compact.Tag(t))
+ return int(id), exact
+}
+
+var root = language.Tag{}
+
+// Base is an ISO 639 language code, used for encoding the base language
+// of a language tag.
+type Base struct {
+ langID language.Language
+}
+
+// ParseBase parses a 2- or 3-letter ISO 639 code.
+// It returns a ValueError if s is a well-formed but unknown language identifier
+// or another error if another error occurred.
+func ParseBase(s string) (Base, error) {
+ l, err := language.ParseBase(s)
+ return Base{l}, err
+}
+
+// String returns the BCP 47 representation of the base language.
+func (b Base) String() string {
+ return b.langID.String()
+}
+
+// ISO3 returns the ISO 639-3 language code.
+func (b Base) ISO3() string {
+ return b.langID.ISO3()
+}
+
+// IsPrivateUse reports whether this language code is reserved for private use.
+func (b Base) IsPrivateUse() bool {
+ return b.langID.IsPrivateUse()
+}
+
+// Script is a 4-letter ISO 15924 code for representing scripts.
+// It is idiomatically represented in title case.
+type Script struct {
+ scriptID language.Script
+}
+
+// ParseScript parses a 4-letter ISO 15924 code.
+// It returns a ValueError if s is a well-formed but unknown script identifier
+// or another error if another error occurred.
+func ParseScript(s string) (Script, error) {
+ sc, err := language.ParseScript(s)
+ return Script{sc}, err
+}
+
+// String returns the script code in title case.
+// It returns "Zzzz" for an unspecified script.
+func (s Script) String() string {
+ return s.scriptID.String()
+}
+
+// IsPrivateUse reports whether this script code is reserved for private use.
+func (s Script) IsPrivateUse() bool {
+ return s.scriptID.IsPrivateUse()
+}
+
+// Region is an ISO 3166-1 or UN M.49 code for representing countries and regions.
+type Region struct {
+ regionID language.Region
+}
+
+// EncodeM49 returns the Region for the given UN M.49 code.
+// It returns an error if r is not a valid code.
+func EncodeM49(r int) (Region, error) {
+ rid, err := language.EncodeM49(r)
+ return Region{rid}, err
+}
+
+// ParseRegion parses a 2- or 3-letter ISO 3166-1 or a UN M.49 code.
+// It returns a ValueError if s is a well-formed but unknown region identifier
+// or another error if another error occurred.
+func ParseRegion(s string) (Region, error) {
+ r, err := language.ParseRegion(s)
+ return Region{r}, err
+}
+
+// String returns the BCP 47 representation for the region.
+// It returns "ZZ" for an unspecified region.
+func (r Region) String() string {
+ return r.regionID.String()
+}
+
+// ISO3 returns the 3-letter ISO code of r.
+// Note that not all regions have a 3-letter ISO code.
+// In such cases this method returns "ZZZ".
+func (r Region) ISO3() string {
+ return r.regionID.ISO3()
+}
+
+// M49 returns the UN M.49 encoding of r, or 0 if this encoding
+// is not defined for r.
+func (r Region) M49() int {
+ return r.regionID.M49()
+}
+
+// IsPrivateUse reports whether r has the ISO 3166 User-assigned status. This
+// may include private-use tags that are assigned by CLDR and used in this
+// implementation. So IsPrivateUse and IsCountry can be simultaneously true.
+func (r Region) IsPrivateUse() bool {
+ return r.regionID.IsPrivateUse()
+}
+
+// IsCountry returns whether this region is a country or autonomous area. This
+// includes non-standard definitions from CLDR.
+func (r Region) IsCountry() bool {
+ return r.regionID.IsCountry()
+}
+
+// IsGroup returns whether this region defines a collection of regions. This
+// includes non-standard definitions from CLDR.
+func (r Region) IsGroup() bool {
+ return r.regionID.IsGroup()
+}
+
+// Contains returns whether Region c is contained by Region r. It returns true
+// if c == r.
+func (r Region) Contains(c Region) bool {
+ return r.regionID.Contains(c.regionID)
+}
+
+// TLD returns the country code top-level domain (ccTLD). UK is returned for GB.
+// In all other cases it returns either the region itself or an error.
+//
+// This method may return an error for a region for which there exists a
+// canonical form with a ccTLD. To get that ccTLD canonicalize r first. The
+// region will already be canonicalized it was obtained from a Tag that was
+// obtained using any of the default methods.
+func (r Region) TLD() (Region, error) {
+ tld, err := r.regionID.TLD()
+ return Region{tld}, err
+}
+
+// Canonicalize returns the region or a possible replacement if the region is
+// deprecated. It will not return a replacement for deprecated regions that
+// are split into multiple regions.
+func (r Region) Canonicalize() Region {
+ return Region{r.regionID.Canonicalize()}
+}
+
+// Variant represents a registered variant of a language as defined by BCP 47.
+type Variant struct {
+ variant string
+}
+
+// ParseVariant parses and returns a Variant. An error is returned if s is not
+// a valid variant.
+func ParseVariant(s string) (Variant, error) {
+ v, err := language.ParseVariant(s)
+ return Variant{v.String()}, err
+}
+
+// String returns the string representation of the variant.
+func (v Variant) String() string {
+ return v.variant
+}
diff --git a/vendor/golang.org/x/text/language/match.go b/vendor/golang.org/x/text/language/match.go
new file mode 100644
index 000000000..1153baf29
--- /dev/null
+++ b/vendor/golang.org/x/text/language/match.go
@@ -0,0 +1,735 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package language
+
+import (
+ "errors"
+ "strings"
+
+ "golang.org/x/text/internal/language"
+)
+
+// A MatchOption configures a Matcher.
+type MatchOption func(*matcher)
+
+// PreferSameScript will, in the absence of a match, result in the first
+// preferred tag with the same script as a supported tag to match this supported
+// tag. The default is currently true, but this may change in the future.
+func PreferSameScript(preferSame bool) MatchOption {
+ return func(m *matcher) { m.preferSameScript = preferSame }
+}
+
+// TODO(v1.0.0): consider making Matcher a concrete type, instead of interface.
+// There doesn't seem to be too much need for multiple types.
+// Making it a concrete type allows MatchStrings to be a method, which will
+// improve its discoverability.
+
+// MatchStrings parses and matches the given strings until one of them matches
+// the language in the Matcher. A string may be an Accept-Language header as
+// handled by ParseAcceptLanguage. The default language is returned if no
+// other language matched.
+func MatchStrings(m Matcher, lang ...string) (tag Tag, index int) {
+ for _, accept := range lang {
+ desired, _, err := ParseAcceptLanguage(accept)
+ if err != nil {
+ continue
+ }
+ if tag, index, conf := m.Match(desired...); conf != No {
+ return tag, index
+ }
+ }
+ tag, index, _ = m.Match()
+ return
+}
+
+// Matcher is the interface that wraps the Match method.
+//
+// Match returns the best match for any of the given tags, along with
+// a unique index associated with the returned tag and a confidence
+// score.
+type Matcher interface {
+ Match(t ...Tag) (tag Tag, index int, c Confidence)
+}
+
+// Comprehends reports the confidence score for a speaker of a given language
+// to being able to comprehend the written form of an alternative language.
+func Comprehends(speaker, alternative Tag) Confidence {
+ _, _, c := NewMatcher([]Tag{alternative}).Match(speaker)
+ return c
+}
+
+// NewMatcher returns a Matcher that matches an ordered list of preferred tags
+// against a list of supported tags based on written intelligibility, closeness
+// of dialect, equivalence of subtags and various other rules. It is initialized
+// with the list of supported tags. The first element is used as the default
+// value in case no match is found.
+//
+// Its Match method matches the first of the given Tags to reach a certain
+// confidence threshold. The tags passed to Match should therefore be specified
+// in order of preference. Extensions are ignored for matching.
+//
+// The index returned by the Match method corresponds to the index of the
+// matched tag in t, but is augmented with the Unicode extension ('u')of the
+// corresponding preferred tag. This allows user locale options to be passed
+// transparently.
+func NewMatcher(t []Tag, options ...MatchOption) Matcher {
+ return newMatcher(t, options)
+}
+
+func (m *matcher) Match(want ...Tag) (t Tag, index int, c Confidence) {
+ var tt language.Tag
+ match, w, c := m.getBest(want...)
+ if match != nil {
+ tt, index = match.tag, match.index
+ } else {
+ // TODO: this should be an option
+ tt = m.default_.tag
+ if m.preferSameScript {
+ outer:
+ for _, w := range want {
+ script, _ := w.Script()
+ if script.scriptID == 0 {
+ // Don't do anything if there is no script, such as with
+ // private subtags.
+ continue
+ }
+ for i, h := range m.supported {
+ if script.scriptID == h.maxScript {
+ tt, index = h.tag, i
+ break outer
+ }
+ }
+ }
+ }
+ // TODO: select first language tag based on script.
+ }
+ if w.RegionID != tt.RegionID && w.RegionID != 0 {
+ if w.RegionID != 0 && tt.RegionID != 0 && tt.RegionID.Contains(w.RegionID) {
+ tt.RegionID = w.RegionID
+ tt.RemakeString()
+ } else if r := w.RegionID.String(); len(r) == 2 {
+ // TODO: also filter macro and deprecated.
+ tt, _ = tt.SetTypeForKey("rg", strings.ToLower(r)+"zzzz")
+ }
+ }
+ // Copy options from the user-provided tag into the result tag. This is hard
+ // to do after the fact, so we do it here.
+ // TODO: add in alternative variants to -u-va-.
+ // TODO: add preferred region to -u-rg-.
+ if e := w.Extensions(); len(e) > 0 {
+ b := language.Builder{}
+ b.SetTag(tt)
+ for _, e := range e {
+ b.AddExt(e)
+ }
+ tt = b.Make()
+ }
+ return makeTag(tt), index, c
+}
+
+// ErrMissingLikelyTagsData indicates no information was available
+// to compute likely values of missing tags.
+var ErrMissingLikelyTagsData = errors.New("missing likely tags data")
+
+// func (t *Tag) setTagsFrom(id Tag) {
+// t.LangID = id.LangID
+// t.ScriptID = id.ScriptID
+// t.RegionID = id.RegionID
+// }
+
+// Tag Matching
+// CLDR defines an algorithm for finding the best match between two sets of language
+// tags. The basic algorithm defines how to score a possible match and then find
+// the match with the best score
+// (see https://www.unicode.org/reports/tr35/#LanguageMatching).
+// Using scoring has several disadvantages. The scoring obfuscates the importance of
+// the various factors considered, making the algorithm harder to understand. Using
+// scoring also requires the full score to be computed for each pair of tags.
+//
+// We will use a different algorithm which aims to have the following properties:
+// - clarity on the precedence of the various selection factors, and
+// - improved performance by allowing early termination of a comparison.
+//
+// Matching algorithm (overview)
+// Input:
+// - supported: a set of supported tags
+// - default: the default tag to return in case there is no match
+// - desired: list of desired tags, ordered by preference, starting with
+// the most-preferred.
+//
+// Algorithm:
+// 1) Set the best match to the lowest confidence level
+// 2) For each tag in "desired":
+// a) For each tag in "supported":
+// 1) compute the match between the two tags.
+// 2) if the match is better than the previous best match, replace it
+// with the new match. (see next section)
+// b) if the current best match is Exact and pin is true the result will be
+// frozen to the language found thusfar, although better matches may
+// still be found for the same language.
+// 3) If the best match so far is below a certain threshold, return "default".
+//
+// Ranking:
+// We use two phases to determine whether one pair of tags are a better match
+// than another pair of tags. First, we determine a rough confidence level. If the
+// levels are different, the one with the highest confidence wins.
+// Second, if the rough confidence levels are identical, we use a set of tie-breaker
+// rules.
+//
+// The confidence level of matching a pair of tags is determined by finding the
+// lowest confidence level of any matches of the corresponding subtags (the
+// result is deemed as good as its weakest link).
+// We define the following levels:
+// Exact - An exact match of a subtag, before adding likely subtags.
+// MaxExact - An exact match of a subtag, after adding likely subtags.
+// [See Note 2].
+// High - High level of mutual intelligibility between different subtag
+// variants.
+// Low - Low level of mutual intelligibility between different subtag
+// variants.
+// No - No mutual intelligibility.
+//
+// The following levels can occur for each type of subtag:
+// Base: Exact, MaxExact, High, Low, No
+// Script: Exact, MaxExact [see Note 3], Low, No
+// Region: Exact, MaxExact, High
+// Variant: Exact, High
+// Private: Exact, No
+//
+// Any result with a confidence level of Low or higher is deemed a possible match.
+// Once a desired tag matches any of the supported tags with a level of MaxExact
+// or higher, the next desired tag is not considered (see Step 2.b).
+// Note that CLDR provides languageMatching data that defines close equivalence
+// classes for base languages, scripts and regions.
+//
+// Tie-breaking
+// If we get the same confidence level for two matches, we apply a sequence of
+// tie-breaking rules. The first that succeeds defines the result. The rules are
+// applied in the following order.
+// 1) Original language was defined and was identical.
+// 2) Original region was defined and was identical.
+// 3) Distance between two maximized regions was the smallest.
+// 4) Original script was defined and was identical.
+// 5) Distance from want tag to have tag using the parent relation [see Note 5.]
+// If there is still no winner after these rules are applied, the first match
+// found wins.
+//
+// Notes:
+// [2] In practice, as matching of Exact is done in a separate phase from
+// matching the other levels, we reuse the Exact level to mean MaxExact in
+// the second phase. As a consequence, we only need the levels defined by
+// the Confidence type. The MaxExact confidence level is mapped to High in
+// the public API.
+// [3] We do not differentiate between maximized script values that were derived
+// from suppressScript versus most likely tag data. We determined that in
+// ranking the two, one ranks just after the other. Moreover, the two cannot
+// occur concurrently. As a consequence, they are identical for practical
+// purposes.
+// [4] In case of deprecated, macro-equivalents and legacy mappings, we assign
+// the MaxExact level to allow iw vs he to still be a closer match than
+// en-AU vs en-US, for example.
+// [5] In CLDR a locale inherits fields that are unspecified for this locale
+// from its parent. Therefore, if a locale is a parent of another locale,
+// it is a strong measure for closeness, especially when no other tie
+// breaker rule applies. One could also argue it is inconsistent, for
+// example, when pt-AO matches pt (which CLDR equates with pt-BR), even
+// though its parent is pt-PT according to the inheritance rules.
+//
+// Implementation Details:
+// There are several performance considerations worth pointing out. Most notably,
+// we preprocess as much as possible (within reason) at the time of creation of a
+// matcher. This includes:
+// - creating a per-language map, which includes data for the raw base language
+// and its canonicalized variant (if applicable),
+// - expanding entries for the equivalence classes defined in CLDR's
+// languageMatch data.
+// The per-language map ensures that typically only a very small number of tags
+// need to be considered. The pre-expansion of canonicalized subtags and
+// equivalence classes reduces the amount of map lookups that need to be done at
+// runtime.
+
+// matcher keeps a set of supported language tags, indexed by language.
+type matcher struct {
+ default_ *haveTag
+ supported []*haveTag
+ index map[language.Language]*matchHeader
+ passSettings bool
+ preferSameScript bool
+}
+
+// matchHeader has the lists of tags for exact matches and matches based on
+// maximized and canonicalized tags for a given language.
+type matchHeader struct {
+ haveTags []*haveTag
+ original bool
+}
+
+// haveTag holds a supported Tag and its maximized script and region. The maximized
+// or canonicalized language is not stored as it is not needed during matching.
+type haveTag struct {
+ tag language.Tag
+
+ // index of this tag in the original list of supported tags.
+ index int
+
+ // conf is the maximum confidence that can result from matching this haveTag.
+ // When conf < Exact this means it was inserted after applying a CLDR equivalence rule.
+ conf Confidence
+
+ // Maximized region and script.
+ maxRegion language.Region
+ maxScript language.Script
+
+ // altScript may be checked as an alternative match to maxScript. If altScript
+ // matches, the confidence level for this match is Low. Theoretically there
+ // could be multiple alternative scripts. This does not occur in practice.
+ altScript language.Script
+
+ // nextMax is the index of the next haveTag with the same maximized tags.
+ nextMax uint16
+}
+
+func makeHaveTag(tag language.Tag, index int) (haveTag, language.Language) {
+ max := tag
+ if tag.LangID != 0 || tag.RegionID != 0 || tag.ScriptID != 0 {
+ max, _ = canonicalize(All, max)
+ max, _ = max.Maximize()
+ max.RemakeString()
+ }
+ return haveTag{tag, index, Exact, max.RegionID, max.ScriptID, altScript(max.LangID, max.ScriptID), 0}, max.LangID
+}
+
+// altScript returns an alternative script that may match the given script with
+// a low confidence. At the moment, the langMatch data allows for at most one
+// script to map to another and we rely on this to keep the code simple.
+func altScript(l language.Language, s language.Script) language.Script {
+ for _, alt := range matchScript {
+ // TODO: also match cases where language is not the same.
+ if (language.Language(alt.wantLang) == l || language.Language(alt.haveLang) == l) &&
+ language.Script(alt.haveScript) == s {
+ return language.Script(alt.wantScript)
+ }
+ }
+ return 0
+}
+
+// addIfNew adds a haveTag to the list of tags only if it is a unique tag.
+// Tags that have the same maximized values are linked by index.
+func (h *matchHeader) addIfNew(n haveTag, exact bool) {
+ h.original = h.original || exact
+ // Don't add new exact matches.
+ for _, v := range h.haveTags {
+ if equalsRest(v.tag, n.tag) {
+ return
+ }
+ }
+ // Allow duplicate maximized tags, but create a linked list to allow quickly
+ // comparing the equivalents and bail out.
+ for i, v := range h.haveTags {
+ if v.maxScript == n.maxScript &&
+ v.maxRegion == n.maxRegion &&
+ v.tag.VariantOrPrivateUseTags() == n.tag.VariantOrPrivateUseTags() {
+ for h.haveTags[i].nextMax != 0 {
+ i = int(h.haveTags[i].nextMax)
+ }
+ h.haveTags[i].nextMax = uint16(len(h.haveTags))
+ break
+ }
+ }
+ h.haveTags = append(h.haveTags, &n)
+}
+
+// header returns the matchHeader for the given language. It creates one if
+// it doesn't already exist.
+func (m *matcher) header(l language.Language) *matchHeader {
+ if h := m.index[l]; h != nil {
+ return h
+ }
+ h := &matchHeader{}
+ m.index[l] = h
+ return h
+}
+
+func toConf(d uint8) Confidence {
+ if d <= 10 {
+ return High
+ }
+ if d < 30 {
+ return Low
+ }
+ return No
+}
+
+// newMatcher builds an index for the given supported tags and returns it as
+// a matcher. It also expands the index by considering various equivalence classes
+// for a given tag.
+func newMatcher(supported []Tag, options []MatchOption) *matcher {
+ m := &matcher{
+ index: make(map[language.Language]*matchHeader),
+ preferSameScript: true,
+ }
+ for _, o := range options {
+ o(m)
+ }
+ if len(supported) == 0 {
+ m.default_ = &haveTag{}
+ return m
+ }
+ // Add supported languages to the index. Add exact matches first to give
+ // them precedence.
+ for i, tag := range supported {
+ tt := tag.tag()
+ pair, _ := makeHaveTag(tt, i)
+ m.header(tt.LangID).addIfNew(pair, true)
+ m.supported = append(m.supported, &pair)
+ }
+ m.default_ = m.header(supported[0].lang()).haveTags[0]
+ // Keep these in two different loops to support the case that two equivalent
+ // languages are distinguished, such as iw and he.
+ for i, tag := range supported {
+ tt := tag.tag()
+ pair, max := makeHaveTag(tt, i)
+ if max != tt.LangID {
+ m.header(max).addIfNew(pair, true)
+ }
+ }
+
+ // update is used to add indexes in the map for equivalent languages.
+ // update will only add entries to original indexes, thus not computing any
+ // transitive relations.
+ update := func(want, have uint16, conf Confidence) {
+ if hh := m.index[language.Language(have)]; hh != nil {
+ if !hh.original {
+ return
+ }
+ hw := m.header(language.Language(want))
+ for _, ht := range hh.haveTags {
+ v := *ht
+ if conf < v.conf {
+ v.conf = conf
+ }
+ v.nextMax = 0 // this value needs to be recomputed
+ if v.altScript != 0 {
+ v.altScript = altScript(language.Language(want), v.maxScript)
+ }
+ hw.addIfNew(v, conf == Exact && hh.original)
+ }
+ }
+ }
+
+ // Add entries for languages with mutual intelligibility as defined by CLDR's
+ // languageMatch data.
+ for _, ml := range matchLang {
+ update(ml.want, ml.have, toConf(ml.distance))
+ if !ml.oneway {
+ update(ml.have, ml.want, toConf(ml.distance))
+ }
+ }
+
+ // Add entries for possible canonicalizations. This is an optimization to
+ // ensure that only one map lookup needs to be done at runtime per desired tag.
+ // First we match deprecated equivalents. If they are perfect equivalents
+ // (their canonicalization simply substitutes a different language code, but
+ // nothing else), the match confidence is Exact, otherwise it is High.
+ for i, lm := range language.AliasMap {
+ // If deprecated codes match and there is no fiddling with the script
+ // or region, we consider it an exact match.
+ conf := Exact
+ if language.AliasTypes[i] != language.Macro {
+ if !isExactEquivalent(language.Language(lm.From)) {
+ conf = High
+ }
+ update(lm.To, lm.From, conf)
+ }
+ update(lm.From, lm.To, conf)
+ }
+ return m
+}
+
+// getBest gets the best matching tag in m for any of the given tags, taking into
+// account the order of preference of the given tags.
+func (m *matcher) getBest(want ...Tag) (got *haveTag, orig language.Tag, c Confidence) {
+ best := bestMatch{}
+ for i, ww := range want {
+ w := ww.tag()
+ var max language.Tag
+ // Check for exact match first.
+ h := m.index[w.LangID]
+ if w.LangID != 0 {
+ if h == nil {
+ continue
+ }
+ // Base language is defined.
+ max, _ = canonicalize(Legacy|Deprecated|Macro, w)
+ // A region that is added through canonicalization is stronger than
+ // a maximized region: set it in the original (e.g. mo -> ro-MD).
+ if w.RegionID != max.RegionID {
+ w.RegionID = max.RegionID
+ }
+ // TODO: should we do the same for scripts?
+ // See test case: en, sr, nl ; sh ; sr
+ max, _ = max.Maximize()
+ } else {
+ // Base language is not defined.
+ if h != nil {
+ for i := range h.haveTags {
+ have := h.haveTags[i]
+ if equalsRest(have.tag, w) {
+ return have, w, Exact
+ }
+ }
+ }
+ if w.ScriptID == 0 && w.RegionID == 0 {
+ // We skip all tags matching und for approximate matching, including
+ // private tags.
+ continue
+ }
+ max, _ = w.Maximize()
+ if h = m.index[max.LangID]; h == nil {
+ continue
+ }
+ }
+ pin := true
+ for _, t := range want[i+1:] {
+ if w.LangID == t.lang() {
+ pin = false
+ break
+ }
+ }
+ // Check for match based on maximized tag.
+ for i := range h.haveTags {
+ have := h.haveTags[i]
+ best.update(have, w, max.ScriptID, max.RegionID, pin)
+ if best.conf == Exact {
+ for have.nextMax != 0 {
+ have = h.haveTags[have.nextMax]
+ best.update(have, w, max.ScriptID, max.RegionID, pin)
+ }
+ return best.have, best.want, best.conf
+ }
+ }
+ }
+ if best.conf <= No {
+ if len(want) != 0 {
+ return nil, want[0].tag(), No
+ }
+ return nil, language.Tag{}, No
+ }
+ return best.have, best.want, best.conf
+}
+
+// bestMatch accumulates the best match so far.
+type bestMatch struct {
+ have *haveTag
+ want language.Tag
+ conf Confidence
+ pinnedRegion language.Region
+ pinLanguage bool
+ sameRegionGroup bool
+ // Cached results from applying tie-breaking rules.
+ origLang bool
+ origReg bool
+ paradigmReg bool
+ regGroupDist uint8
+ origScript bool
+}
+
+// update updates the existing best match if the new pair is considered to be a
+// better match. To determine if the given pair is a better match, it first
+// computes the rough confidence level. If this surpasses the current match, it
+// will replace it and update the tie-breaker rule cache. If there is a tie, it
+// proceeds with applying a series of tie-breaker rules. If there is no
+// conclusive winner after applying the tie-breaker rules, it leaves the current
+// match as the preferred match.
+//
+// If pin is true and have and tag are a strong match, it will henceforth only
+// consider matches for this language. This corresponds to the idea that most
+// users have a strong preference for the first defined language. A user can
+// still prefer a second language over a dialect of the preferred language by
+// explicitly specifying dialects, e.g. "en, nl, en-GB". In this case pin should
+// be false.
+func (m *bestMatch) update(have *haveTag, tag language.Tag, maxScript language.Script, maxRegion language.Region, pin bool) {
+ // Bail if the maximum attainable confidence is below that of the current best match.
+ c := have.conf
+ if c < m.conf {
+ return
+ }
+ // Don't change the language once we already have found an exact match.
+ if m.pinLanguage && tag.LangID != m.want.LangID {
+ return
+ }
+ // Pin the region group if we are comparing tags for the same language.
+ if tag.LangID == m.want.LangID && m.sameRegionGroup {
+ _, sameGroup := regionGroupDist(m.pinnedRegion, have.maxRegion, have.maxScript, m.want.LangID)
+ if !sameGroup {
+ return
+ }
+ }
+ if c == Exact && have.maxScript == maxScript {
+ // If there is another language and then another entry of this language,
+ // don't pin anything, otherwise pin the language.
+ m.pinLanguage = pin
+ }
+ if equalsRest(have.tag, tag) {
+ } else if have.maxScript != maxScript {
+ // There is usually very little comprehension between different scripts.
+ // In a few cases there may still be Low comprehension. This possibility
+ // is pre-computed and stored in have.altScript.
+ if Low < m.conf || have.altScript != maxScript {
+ return
+ }
+ c = Low
+ } else if have.maxRegion != maxRegion {
+ if High < c {
+ // There is usually a small difference between languages across regions.
+ c = High
+ }
+ }
+
+ // We store the results of the computations of the tie-breaker rules along
+ // with the best match. There is no need to do the checks once we determine
+ // we have a winner, but we do still need to do the tie-breaker computations.
+ // We use "beaten" to keep track if we still need to do the checks.
+ beaten := false // true if the new pair defeats the current one.
+ if c != m.conf {
+ if c < m.conf {
+ return
+ }
+ beaten = true
+ }
+
+ // Tie-breaker rules:
+ // We prefer if the pre-maximized language was specified and identical.
+ origLang := have.tag.LangID == tag.LangID && tag.LangID != 0
+ if !beaten && m.origLang != origLang {
+ if m.origLang {
+ return
+ }
+ beaten = true
+ }
+
+ // We prefer if the pre-maximized region was specified and identical.
+ origReg := have.tag.RegionID == tag.RegionID && tag.RegionID != 0
+ if !beaten && m.origReg != origReg {
+ if m.origReg {
+ return
+ }
+ beaten = true
+ }
+
+ regGroupDist, sameGroup := regionGroupDist(have.maxRegion, maxRegion, maxScript, tag.LangID)
+ if !beaten && m.regGroupDist != regGroupDist {
+ if regGroupDist > m.regGroupDist {
+ return
+ }
+ beaten = true
+ }
+
+ paradigmReg := isParadigmLocale(tag.LangID, have.maxRegion)
+ if !beaten && m.paradigmReg != paradigmReg {
+ if !paradigmReg {
+ return
+ }
+ beaten = true
+ }
+
+ // Next we prefer if the pre-maximized script was specified and identical.
+ origScript := have.tag.ScriptID == tag.ScriptID && tag.ScriptID != 0
+ if !beaten && m.origScript != origScript {
+ if m.origScript {
+ return
+ }
+ beaten = true
+ }
+
+ // Update m to the newly found best match.
+ if beaten {
+ m.have = have
+ m.want = tag
+ m.conf = c
+ m.pinnedRegion = maxRegion
+ m.sameRegionGroup = sameGroup
+ m.origLang = origLang
+ m.origReg = origReg
+ m.paradigmReg = paradigmReg
+ m.origScript = origScript
+ m.regGroupDist = regGroupDist
+ }
+}
+
+func isParadigmLocale(lang language.Language, r language.Region) bool {
+ for _, e := range paradigmLocales {
+ if language.Language(e[0]) == lang && (r == language.Region(e[1]) || r == language.Region(e[2])) {
+ return true
+ }
+ }
+ return false
+}
+
+// regionGroupDist computes the distance between two regions based on their
+// CLDR grouping.
+func regionGroupDist(a, b language.Region, script language.Script, lang language.Language) (dist uint8, same bool) {
+ const defaultDistance = 4
+
+ aGroup := uint(regionToGroups[a]) << 1
+ bGroup := uint(regionToGroups[b]) << 1
+ for _, ri := range matchRegion {
+ if language.Language(ri.lang) == lang && (ri.script == 0 || language.Script(ri.script) == script) {
+ group := uint(1 << (ri.group &^ 0x80))
+ if 0x80&ri.group == 0 {
+ if aGroup&bGroup&group != 0 { // Both regions are in the group.
+ return ri.distance, ri.distance == defaultDistance
+ }
+ } else {
+ if (aGroup|bGroup)&group == 0 { // Both regions are not in the group.
+ return ri.distance, ri.distance == defaultDistance
+ }
+ }
+ }
+ }
+ return defaultDistance, true
+}
+
+// equalsRest compares everything except the language.
+func equalsRest(a, b language.Tag) bool {
+ // TODO: don't include extensions in this comparison. To do this efficiently,
+ // though, we should handle private tags separately.
+ return a.ScriptID == b.ScriptID && a.RegionID == b.RegionID && a.VariantOrPrivateUseTags() == b.VariantOrPrivateUseTags()
+}
+
+// isExactEquivalent returns true if canonicalizing the language will not alter
+// the script or region of a tag.
+func isExactEquivalent(l language.Language) bool {
+ for _, o := range notEquivalent {
+ if o == l {
+ return false
+ }
+ }
+ return true
+}
+
+var notEquivalent []language.Language
+
+func init() {
+ // Create a list of all languages for which canonicalization may alter the
+ // script or region.
+ for _, lm := range language.AliasMap {
+ tag := language.Tag{LangID: language.Language(lm.From)}
+ if tag, _ = canonicalize(All, tag); tag.ScriptID != 0 || tag.RegionID != 0 {
+ notEquivalent = append(notEquivalent, language.Language(lm.From))
+ }
+ }
+ // Maximize undefined regions of paradigm locales.
+ for i, v := range paradigmLocales {
+ t := language.Tag{LangID: language.Language(v[0])}
+ max, _ := t.Maximize()
+ if v[1] == 0 {
+ paradigmLocales[i][1] = uint16(max.RegionID)
+ }
+ if v[2] == 0 {
+ paradigmLocales[i][2] = uint16(max.RegionID)
+ }
+ }
+}
diff --git a/vendor/golang.org/x/text/language/parse.go b/vendor/golang.org/x/text/language/parse.go
new file mode 100644
index 000000000..053336e28
--- /dev/null
+++ b/vendor/golang.org/x/text/language/parse.go
@@ -0,0 +1,256 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package language
+
+import (
+ "errors"
+ "sort"
+ "strconv"
+ "strings"
+
+ "golang.org/x/text/internal/language"
+)
+
+// ValueError is returned by any of the parsing functions when the
+// input is well-formed but the respective subtag is not recognized
+// as a valid value.
+type ValueError interface {
+ error
+
+ // Subtag returns the subtag for which the error occurred.
+ Subtag() string
+}
+
+// Parse parses the given BCP 47 string and returns a valid Tag. If parsing
+// failed it returns an error and any part of the tag that could be parsed.
+// If parsing succeeded but an unknown value was found, it returns
+// ValueError. The Tag returned in this case is just stripped of the unknown
+// value. All other values are preserved. It accepts tags in the BCP 47 format
+// and extensions to this standard defined in
+// https://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers.
+// The resulting tag is canonicalized using the default canonicalization type.
+func Parse(s string) (t Tag, err error) {
+ return Default.Parse(s)
+}
+
+// Parse parses the given BCP 47 string and returns a valid Tag. If parsing
+// failed it returns an error and any part of the tag that could be parsed.
+// If parsing succeeded but an unknown value was found, it returns
+// ValueError. The Tag returned in this case is just stripped of the unknown
+// value. All other values are preserved. It accepts tags in the BCP 47 format
+// and extensions to this standard defined in
+// https://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers.
+// The resulting tag is canonicalized using the canonicalization type c.
+func (c CanonType) Parse(s string) (t Tag, err error) {
+ defer func() {
+ if recover() != nil {
+ t = Tag{}
+ err = language.ErrSyntax
+ }
+ }()
+
+ tt, err := language.Parse(s)
+ if err != nil {
+ return makeTag(tt), err
+ }
+ tt, changed := canonicalize(c, tt)
+ if changed {
+ tt.RemakeString()
+ }
+ return makeTag(tt), nil
+}
+
+// Compose creates a Tag from individual parts, which may be of type Tag, Base,
+// Script, Region, Variant, []Variant, Extension, []Extension or error. If a
+// Base, Script or Region or slice of type Variant or Extension is passed more
+// than once, the latter will overwrite the former. Variants and Extensions are
+// accumulated, but if two extensions of the same type are passed, the latter
+// will replace the former. For -u extensions, though, the key-type pairs are
+// added, where later values overwrite older ones. A Tag overwrites all former
+// values and typically only makes sense as the first argument. The resulting
+// tag is returned after canonicalizing using the Default CanonType. If one or
+// more errors are encountered, one of the errors is returned.
+func Compose(part ...interface{}) (t Tag, err error) {
+ return Default.Compose(part...)
+}
+
+// Compose creates a Tag from individual parts, which may be of type Tag, Base,
+// Script, Region, Variant, []Variant, Extension, []Extension or error. If a
+// Base, Script or Region or slice of type Variant or Extension is passed more
+// than once, the latter will overwrite the former. Variants and Extensions are
+// accumulated, but if two extensions of the same type are passed, the latter
+// will replace the former. For -u extensions, though, the key-type pairs are
+// added, where later values overwrite older ones. A Tag overwrites all former
+// values and typically only makes sense as the first argument. The resulting
+// tag is returned after canonicalizing using CanonType c. If one or more errors
+// are encountered, one of the errors is returned.
+func (c CanonType) Compose(part ...interface{}) (t Tag, err error) {
+ defer func() {
+ if recover() != nil {
+ t = Tag{}
+ err = language.ErrSyntax
+ }
+ }()
+
+ var b language.Builder
+ if err = update(&b, part...); err != nil {
+ return und, err
+ }
+ b.Tag, _ = canonicalize(c, b.Tag)
+ return makeTag(b.Make()), err
+}
+
+var errInvalidArgument = errors.New("invalid Extension or Variant")
+
+func update(b *language.Builder, part ...interface{}) (err error) {
+ for _, x := range part {
+ switch v := x.(type) {
+ case Tag:
+ b.SetTag(v.tag())
+ case Base:
+ b.Tag.LangID = v.langID
+ case Script:
+ b.Tag.ScriptID = v.scriptID
+ case Region:
+ b.Tag.RegionID = v.regionID
+ case Variant:
+ if v.variant == "" {
+ err = errInvalidArgument
+ break
+ }
+ b.AddVariant(v.variant)
+ case Extension:
+ if v.s == "" {
+ err = errInvalidArgument
+ break
+ }
+ b.SetExt(v.s)
+ case []Variant:
+ b.ClearVariants()
+ for _, v := range v {
+ b.AddVariant(v.variant)
+ }
+ case []Extension:
+ b.ClearExtensions()
+ for _, e := range v {
+ b.SetExt(e.s)
+ }
+ // TODO: support parsing of raw strings based on morphology or just extensions?
+ case error:
+ if v != nil {
+ err = v
+ }
+ }
+ }
+ return
+}
+
+var errInvalidWeight = errors.New("ParseAcceptLanguage: invalid weight")
+var errTagListTooLarge = errors.New("tag list exceeds max length")
+
+// ParseAcceptLanguage parses the contents of an Accept-Language header as
+// defined in http://www.ietf.org/rfc/rfc2616.txt and returns a list of Tags and
+// a list of corresponding quality weights. It is more permissive than RFC 2616
+// and may return non-nil slices even if the input is not valid.
+// The Tags will be sorted by highest weight first and then by first occurrence.
+// Tags with a weight of zero will be dropped. An error will be returned if the
+// input could not be parsed.
+func ParseAcceptLanguage(s string) (tag []Tag, q []float32, err error) {
+ defer func() {
+ if recover() != nil {
+ tag = nil
+ q = nil
+ err = language.ErrSyntax
+ }
+ }()
+
+ if strings.Count(s, "-") > 1000 {
+ return nil, nil, errTagListTooLarge
+ }
+
+ var entry string
+ for s != "" {
+ if entry, s = split(s, ','); entry == "" {
+ continue
+ }
+
+ entry, weight := split(entry, ';')
+
+ // Scan the language.
+ t, err := Parse(entry)
+ if err != nil {
+ id, ok := acceptFallback[entry]
+ if !ok {
+ return nil, nil, err
+ }
+ t = makeTag(language.Tag{LangID: id})
+ }
+
+ // Scan the optional weight.
+ w := 1.0
+ if weight != "" {
+ weight = consume(weight, 'q')
+ weight = consume(weight, '=')
+ // consume returns the empty string when a token could not be
+ // consumed, resulting in an error for ParseFloat.
+ if w, err = strconv.ParseFloat(weight, 32); err != nil {
+ return nil, nil, errInvalidWeight
+ }
+ // Drop tags with a quality weight of 0.
+ if w <= 0 {
+ continue
+ }
+ }
+
+ tag = append(tag, t)
+ q = append(q, float32(w))
+ }
+ sort.Stable(&tagSort{tag, q})
+ return tag, q, nil
+}
+
+// consume removes a leading token c from s and returns the result or the empty
+// string if there is no such token.
+func consume(s string, c byte) string {
+ if s == "" || s[0] != c {
+ return ""
+ }
+ return strings.TrimSpace(s[1:])
+}
+
+func split(s string, c byte) (head, tail string) {
+ if i := strings.IndexByte(s, c); i >= 0 {
+ return strings.TrimSpace(s[:i]), strings.TrimSpace(s[i+1:])
+ }
+ return strings.TrimSpace(s), ""
+}
+
+// Add hack mapping to deal with a small number of cases that occur
+// in Accept-Language (with reasonable frequency).
+var acceptFallback = map[string]language.Language{
+ "english": _en,
+ "deutsch": _de,
+ "italian": _it,
+ "french": _fr,
+ "*": _mul, // defined in the spec to match all languages.
+}
+
+type tagSort struct {
+ tag []Tag
+ q []float32
+}
+
+func (s *tagSort) Len() int {
+ return len(s.q)
+}
+
+func (s *tagSort) Less(i, j int) bool {
+ return s.q[i] > s.q[j]
+}
+
+func (s *tagSort) Swap(i, j int) {
+ s.tag[i], s.tag[j] = s.tag[j], s.tag[i]
+ s.q[i], s.q[j] = s.q[j], s.q[i]
+}
diff --git a/vendor/golang.org/x/text/language/tables.go b/vendor/golang.org/x/text/language/tables.go
new file mode 100644
index 000000000..a6573dcb2
--- /dev/null
+++ b/vendor/golang.org/x/text/language/tables.go
@@ -0,0 +1,298 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+package language
+
+// CLDRVersion is the CLDR version from which the tables in this package are derived.
+const CLDRVersion = "32"
+
+const (
+ _de = 269
+ _en = 313
+ _fr = 350
+ _it = 505
+ _mo = 784
+ _no = 879
+ _nb = 839
+ _pt = 960
+ _sh = 1031
+ _mul = 806
+ _und = 0
+)
+const (
+ _001 = 1
+ _419 = 31
+ _BR = 65
+ _CA = 73
+ _ES = 111
+ _GB = 124
+ _MD = 189
+ _PT = 239
+ _UK = 307
+ _US = 310
+ _ZZ = 358
+ _XA = 324
+ _XC = 326
+ _XK = 334
+)
+const (
+ _Latn = 91
+ _Hani = 57
+ _Hans = 59
+ _Hant = 60
+ _Qaaa = 149
+ _Qaai = 157
+ _Qabx = 198
+ _Zinh = 255
+ _Zyyy = 260
+ _Zzzz = 261
+)
+
+var regionToGroups = []uint8{ // 359 elements
+ // Entry 0 - 3F
+ 0x00, 0x00, 0x00, 0x04, 0x04, 0x00, 0x00, 0x04,
+ 0x00, 0x00, 0x00, 0x00, 0x04, 0x04, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x04, 0x00,
+ 0x00, 0x04, 0x00, 0x00, 0x04, 0x01, 0x00, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x04, 0x04, 0x00, 0x04,
+ // Entry 40 - 7F
+ 0x04, 0x04, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x04, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x04, 0x00, 0x00, 0x04, 0x00, 0x00, 0x04,
+ 0x00, 0x00, 0x04, 0x00, 0x04, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x04, 0x00,
+ 0x08, 0x00, 0x04, 0x00, 0x00, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x04,
+ // Entry 80 - BF
+ 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x04, 0x00,
+ 0x00, 0x00, 0x04, 0x01, 0x00, 0x04, 0x02, 0x00,
+ 0x04, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x04, 0x00,
+ 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x08, 0x00, 0x00, 0x00, 0x04,
+ // Entry C0 - FF
+ 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02,
+ 0x01, 0x04, 0x08, 0x04, 0x00, 0x00, 0x00, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x04, 0x00, 0x04, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x04, 0x00, 0x05, 0x00, 0x00,
+ 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ // Entry 100 - 13F
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04,
+ 0x00, 0x00, 0x00, 0x04, 0x04, 0x00, 0x00, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x08, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x05, 0x04,
+ 0x00, 0x00, 0x04, 0x00, 0x04, 0x04, 0x05, 0x00,
+ // Entry 140 - 17F
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+} // Size: 383 bytes
+
+var paradigmLocales = [][3]uint16{ // 3 elements
+ 0: [3]uint16{0x139, 0x0, 0x7c},
+ 1: [3]uint16{0x13e, 0x0, 0x1f},
+ 2: [3]uint16{0x3c0, 0x41, 0xef},
+} // Size: 42 bytes
+
+type mutualIntelligibility struct {
+ want uint16
+ have uint16
+ distance uint8
+ oneway bool
+}
+type scriptIntelligibility struct {
+ wantLang uint16
+ haveLang uint16
+ wantScript uint8
+ haveScript uint8
+ distance uint8
+}
+type regionIntelligibility struct {
+ lang uint16
+ script uint8
+ group uint8
+ distance uint8
+}
+
+// matchLang holds pairs of langIDs of base languages that are typically
+// mutually intelligible. Each pair is associated with a confidence and
+// whether the intelligibility goes one or both ways.
+var matchLang = []mutualIntelligibility{ // 113 elements
+ 0: {want: 0x1d1, have: 0xb7, distance: 0x4, oneway: false},
+ 1: {want: 0x407, have: 0xb7, distance: 0x4, oneway: false},
+ 2: {want: 0x407, have: 0x1d1, distance: 0x4, oneway: false},
+ 3: {want: 0x407, have: 0x432, distance: 0x4, oneway: false},
+ 4: {want: 0x43a, have: 0x1, distance: 0x4, oneway: false},
+ 5: {want: 0x1a3, have: 0x10d, distance: 0x4, oneway: true},
+ 6: {want: 0x295, have: 0x10d, distance: 0x4, oneway: true},
+ 7: {want: 0x101, have: 0x36f, distance: 0x8, oneway: false},
+ 8: {want: 0x101, have: 0x347, distance: 0x8, oneway: false},
+ 9: {want: 0x5, have: 0x3e2, distance: 0xa, oneway: true},
+ 10: {want: 0xd, have: 0x139, distance: 0xa, oneway: true},
+ 11: {want: 0x16, have: 0x367, distance: 0xa, oneway: true},
+ 12: {want: 0x21, have: 0x139, distance: 0xa, oneway: true},
+ 13: {want: 0x56, have: 0x13e, distance: 0xa, oneway: true},
+ 14: {want: 0x58, have: 0x3e2, distance: 0xa, oneway: true},
+ 15: {want: 0x71, have: 0x3e2, distance: 0xa, oneway: true},
+ 16: {want: 0x75, have: 0x139, distance: 0xa, oneway: true},
+ 17: {want: 0x82, have: 0x1be, distance: 0xa, oneway: true},
+ 18: {want: 0xa5, have: 0x139, distance: 0xa, oneway: true},
+ 19: {want: 0xb2, have: 0x15e, distance: 0xa, oneway: true},
+ 20: {want: 0xdd, have: 0x153, distance: 0xa, oneway: true},
+ 21: {want: 0xe5, have: 0x139, distance: 0xa, oneway: true},
+ 22: {want: 0xe9, have: 0x3a, distance: 0xa, oneway: true},
+ 23: {want: 0xf0, have: 0x15e, distance: 0xa, oneway: true},
+ 24: {want: 0xf9, have: 0x15e, distance: 0xa, oneway: true},
+ 25: {want: 0x100, have: 0x139, distance: 0xa, oneway: true},
+ 26: {want: 0x130, have: 0x139, distance: 0xa, oneway: true},
+ 27: {want: 0x13c, have: 0x139, distance: 0xa, oneway: true},
+ 28: {want: 0x140, have: 0x151, distance: 0xa, oneway: true},
+ 29: {want: 0x145, have: 0x13e, distance: 0xa, oneway: true},
+ 30: {want: 0x158, have: 0x101, distance: 0xa, oneway: true},
+ 31: {want: 0x16d, have: 0x367, distance: 0xa, oneway: true},
+ 32: {want: 0x16e, have: 0x139, distance: 0xa, oneway: true},
+ 33: {want: 0x16f, have: 0x139, distance: 0xa, oneway: true},
+ 34: {want: 0x17e, have: 0x139, distance: 0xa, oneway: true},
+ 35: {want: 0x190, have: 0x13e, distance: 0xa, oneway: true},
+ 36: {want: 0x194, have: 0x13e, distance: 0xa, oneway: true},
+ 37: {want: 0x1a4, have: 0x1be, distance: 0xa, oneway: true},
+ 38: {want: 0x1b4, have: 0x139, distance: 0xa, oneway: true},
+ 39: {want: 0x1b8, have: 0x139, distance: 0xa, oneway: true},
+ 40: {want: 0x1d4, have: 0x15e, distance: 0xa, oneway: true},
+ 41: {want: 0x1d7, have: 0x3e2, distance: 0xa, oneway: true},
+ 42: {want: 0x1d9, have: 0x139, distance: 0xa, oneway: true},
+ 43: {want: 0x1e7, have: 0x139, distance: 0xa, oneway: true},
+ 44: {want: 0x1f8, have: 0x139, distance: 0xa, oneway: true},
+ 45: {want: 0x20e, have: 0x1e1, distance: 0xa, oneway: true},
+ 46: {want: 0x210, have: 0x139, distance: 0xa, oneway: true},
+ 47: {want: 0x22d, have: 0x15e, distance: 0xa, oneway: true},
+ 48: {want: 0x242, have: 0x3e2, distance: 0xa, oneway: true},
+ 49: {want: 0x24a, have: 0x139, distance: 0xa, oneway: true},
+ 50: {want: 0x251, have: 0x139, distance: 0xa, oneway: true},
+ 51: {want: 0x265, have: 0x139, distance: 0xa, oneway: true},
+ 52: {want: 0x274, have: 0x48a, distance: 0xa, oneway: true},
+ 53: {want: 0x28a, have: 0x3e2, distance: 0xa, oneway: true},
+ 54: {want: 0x28e, have: 0x1f9, distance: 0xa, oneway: true},
+ 55: {want: 0x2a3, have: 0x139, distance: 0xa, oneway: true},
+ 56: {want: 0x2b5, have: 0x15e, distance: 0xa, oneway: true},
+ 57: {want: 0x2b8, have: 0x139, distance: 0xa, oneway: true},
+ 58: {want: 0x2be, have: 0x139, distance: 0xa, oneway: true},
+ 59: {want: 0x2c3, have: 0x15e, distance: 0xa, oneway: true},
+ 60: {want: 0x2ed, have: 0x139, distance: 0xa, oneway: true},
+ 61: {want: 0x2f1, have: 0x15e, distance: 0xa, oneway: true},
+ 62: {want: 0x2fa, have: 0x139, distance: 0xa, oneway: true},
+ 63: {want: 0x2ff, have: 0x7e, distance: 0xa, oneway: true},
+ 64: {want: 0x304, have: 0x139, distance: 0xa, oneway: true},
+ 65: {want: 0x30b, have: 0x3e2, distance: 0xa, oneway: true},
+ 66: {want: 0x31b, have: 0x1be, distance: 0xa, oneway: true},
+ 67: {want: 0x31f, have: 0x1e1, distance: 0xa, oneway: true},
+ 68: {want: 0x320, have: 0x139, distance: 0xa, oneway: true},
+ 69: {want: 0x331, have: 0x139, distance: 0xa, oneway: true},
+ 70: {want: 0x351, have: 0x139, distance: 0xa, oneway: true},
+ 71: {want: 0x36a, have: 0x347, distance: 0xa, oneway: false},
+ 72: {want: 0x36a, have: 0x36f, distance: 0xa, oneway: true},
+ 73: {want: 0x37a, have: 0x139, distance: 0xa, oneway: true},
+ 74: {want: 0x387, have: 0x139, distance: 0xa, oneway: true},
+ 75: {want: 0x389, have: 0x139, distance: 0xa, oneway: true},
+ 76: {want: 0x38b, have: 0x15e, distance: 0xa, oneway: true},
+ 77: {want: 0x390, have: 0x139, distance: 0xa, oneway: true},
+ 78: {want: 0x395, have: 0x139, distance: 0xa, oneway: true},
+ 79: {want: 0x39d, have: 0x139, distance: 0xa, oneway: true},
+ 80: {want: 0x3a5, have: 0x139, distance: 0xa, oneway: true},
+ 81: {want: 0x3be, have: 0x139, distance: 0xa, oneway: true},
+ 82: {want: 0x3c4, have: 0x13e, distance: 0xa, oneway: true},
+ 83: {want: 0x3d4, have: 0x10d, distance: 0xa, oneway: true},
+ 84: {want: 0x3d9, have: 0x139, distance: 0xa, oneway: true},
+ 85: {want: 0x3e5, have: 0x15e, distance: 0xa, oneway: true},
+ 86: {want: 0x3e9, have: 0x1be, distance: 0xa, oneway: true},
+ 87: {want: 0x3fa, have: 0x139, distance: 0xa, oneway: true},
+ 88: {want: 0x40c, have: 0x139, distance: 0xa, oneway: true},
+ 89: {want: 0x423, have: 0x139, distance: 0xa, oneway: true},
+ 90: {want: 0x429, have: 0x139, distance: 0xa, oneway: true},
+ 91: {want: 0x431, have: 0x139, distance: 0xa, oneway: true},
+ 92: {want: 0x43b, have: 0x139, distance: 0xa, oneway: true},
+ 93: {want: 0x43e, have: 0x1e1, distance: 0xa, oneway: true},
+ 94: {want: 0x445, have: 0x139, distance: 0xa, oneway: true},
+ 95: {want: 0x450, have: 0x139, distance: 0xa, oneway: true},
+ 96: {want: 0x461, have: 0x139, distance: 0xa, oneway: true},
+ 97: {want: 0x467, have: 0x3e2, distance: 0xa, oneway: true},
+ 98: {want: 0x46f, have: 0x139, distance: 0xa, oneway: true},
+ 99: {want: 0x476, have: 0x3e2, distance: 0xa, oneway: true},
+ 100: {want: 0x3883, have: 0x139, distance: 0xa, oneway: true},
+ 101: {want: 0x480, have: 0x139, distance: 0xa, oneway: true},
+ 102: {want: 0x482, have: 0x139, distance: 0xa, oneway: true},
+ 103: {want: 0x494, have: 0x3e2, distance: 0xa, oneway: true},
+ 104: {want: 0x49d, have: 0x139, distance: 0xa, oneway: true},
+ 105: {want: 0x4ac, have: 0x529, distance: 0xa, oneway: true},
+ 106: {want: 0x4b4, have: 0x139, distance: 0xa, oneway: true},
+ 107: {want: 0x4bc, have: 0x3e2, distance: 0xa, oneway: true},
+ 108: {want: 0x4e5, have: 0x15e, distance: 0xa, oneway: true},
+ 109: {want: 0x4f2, have: 0x139, distance: 0xa, oneway: true},
+ 110: {want: 0x512, have: 0x139, distance: 0xa, oneway: true},
+ 111: {want: 0x518, have: 0x139, distance: 0xa, oneway: true},
+ 112: {want: 0x52f, have: 0x139, distance: 0xa, oneway: true},
+} // Size: 702 bytes
+
+// matchScript holds pairs of scriptIDs where readers of one script
+// can typically also read the other. Each is associated with a confidence.
+var matchScript = []scriptIntelligibility{ // 26 elements
+ 0: {wantLang: 0x432, haveLang: 0x432, wantScript: 0x5b, haveScript: 0x20, distance: 0x5},
+ 1: {wantLang: 0x432, haveLang: 0x432, wantScript: 0x20, haveScript: 0x5b, distance: 0x5},
+ 2: {wantLang: 0x58, haveLang: 0x3e2, wantScript: 0x5b, haveScript: 0x20, distance: 0xa},
+ 3: {wantLang: 0xa5, haveLang: 0x139, wantScript: 0xe, haveScript: 0x5b, distance: 0xa},
+ 4: {wantLang: 0x1d7, haveLang: 0x3e2, wantScript: 0x8, haveScript: 0x20, distance: 0xa},
+ 5: {wantLang: 0x210, haveLang: 0x139, wantScript: 0x2e, haveScript: 0x5b, distance: 0xa},
+ 6: {wantLang: 0x24a, haveLang: 0x139, wantScript: 0x4f, haveScript: 0x5b, distance: 0xa},
+ 7: {wantLang: 0x251, haveLang: 0x139, wantScript: 0x53, haveScript: 0x5b, distance: 0xa},
+ 8: {wantLang: 0x2b8, haveLang: 0x139, wantScript: 0x58, haveScript: 0x5b, distance: 0xa},
+ 9: {wantLang: 0x304, haveLang: 0x139, wantScript: 0x6f, haveScript: 0x5b, distance: 0xa},
+ 10: {wantLang: 0x331, haveLang: 0x139, wantScript: 0x76, haveScript: 0x5b, distance: 0xa},
+ 11: {wantLang: 0x351, haveLang: 0x139, wantScript: 0x22, haveScript: 0x5b, distance: 0xa},
+ 12: {wantLang: 0x395, haveLang: 0x139, wantScript: 0x83, haveScript: 0x5b, distance: 0xa},
+ 13: {wantLang: 0x39d, haveLang: 0x139, wantScript: 0x36, haveScript: 0x5b, distance: 0xa},
+ 14: {wantLang: 0x3be, haveLang: 0x139, wantScript: 0x5, haveScript: 0x5b, distance: 0xa},
+ 15: {wantLang: 0x3fa, haveLang: 0x139, wantScript: 0x5, haveScript: 0x5b, distance: 0xa},
+ 16: {wantLang: 0x40c, haveLang: 0x139, wantScript: 0xd6, haveScript: 0x5b, distance: 0xa},
+ 17: {wantLang: 0x450, haveLang: 0x139, wantScript: 0xe6, haveScript: 0x5b, distance: 0xa},
+ 18: {wantLang: 0x461, haveLang: 0x139, wantScript: 0xe9, haveScript: 0x5b, distance: 0xa},
+ 19: {wantLang: 0x46f, haveLang: 0x139, wantScript: 0x2c, haveScript: 0x5b, distance: 0xa},
+ 20: {wantLang: 0x476, haveLang: 0x3e2, wantScript: 0x5b, haveScript: 0x20, distance: 0xa},
+ 21: {wantLang: 0x4b4, haveLang: 0x139, wantScript: 0x5, haveScript: 0x5b, distance: 0xa},
+ 22: {wantLang: 0x4bc, haveLang: 0x3e2, wantScript: 0x5b, haveScript: 0x20, distance: 0xa},
+ 23: {wantLang: 0x512, haveLang: 0x139, wantScript: 0x3e, haveScript: 0x5b, distance: 0xa},
+ 24: {wantLang: 0x529, haveLang: 0x529, wantScript: 0x3b, haveScript: 0x3c, distance: 0xf},
+ 25: {wantLang: 0x529, haveLang: 0x529, wantScript: 0x3c, haveScript: 0x3b, distance: 0x13},
+} // Size: 232 bytes
+
+var matchRegion = []regionIntelligibility{ // 15 elements
+ 0: {lang: 0x3a, script: 0x0, group: 0x4, distance: 0x4},
+ 1: {lang: 0x3a, script: 0x0, group: 0x84, distance: 0x4},
+ 2: {lang: 0x139, script: 0x0, group: 0x1, distance: 0x4},
+ 3: {lang: 0x139, script: 0x0, group: 0x81, distance: 0x4},
+ 4: {lang: 0x13e, script: 0x0, group: 0x3, distance: 0x4},
+ 5: {lang: 0x13e, script: 0x0, group: 0x83, distance: 0x4},
+ 6: {lang: 0x3c0, script: 0x0, group: 0x3, distance: 0x4},
+ 7: {lang: 0x3c0, script: 0x0, group: 0x83, distance: 0x4},
+ 8: {lang: 0x529, script: 0x3c, group: 0x2, distance: 0x4},
+ 9: {lang: 0x529, script: 0x3c, group: 0x82, distance: 0x4},
+ 10: {lang: 0x3a, script: 0x0, group: 0x80, distance: 0x5},
+ 11: {lang: 0x139, script: 0x0, group: 0x80, distance: 0x5},
+ 12: {lang: 0x13e, script: 0x0, group: 0x80, distance: 0x5},
+ 13: {lang: 0x3c0, script: 0x0, group: 0x80, distance: 0x5},
+ 14: {lang: 0x529, script: 0x3c, group: 0x80, distance: 0x5},
+} // Size: 114 bytes
+
+// Total table size 1473 bytes (1KiB); checksum: 7BB90B5C
diff --git a/vendor/golang.org/x/text/language/tags.go b/vendor/golang.org/x/text/language/tags.go
new file mode 100644
index 000000000..42ea79266
--- /dev/null
+++ b/vendor/golang.org/x/text/language/tags.go
@@ -0,0 +1,145 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package language
+
+import "golang.org/x/text/internal/language/compact"
+
+// TODO: Various sets of commonly use tags and regions.
+
+// MustParse is like Parse, but panics if the given BCP 47 tag cannot be parsed.
+// It simplifies safe initialization of Tag values.
+func MustParse(s string) Tag {
+ t, err := Parse(s)
+ if err != nil {
+ panic(err)
+ }
+ return t
+}
+
+// MustParse is like Parse, but panics if the given BCP 47 tag cannot be parsed.
+// It simplifies safe initialization of Tag values.
+func (c CanonType) MustParse(s string) Tag {
+ t, err := c.Parse(s)
+ if err != nil {
+ panic(err)
+ }
+ return t
+}
+
+// MustParseBase is like ParseBase, but panics if the given base cannot be parsed.
+// It simplifies safe initialization of Base values.
+func MustParseBase(s string) Base {
+ b, err := ParseBase(s)
+ if err != nil {
+ panic(err)
+ }
+ return b
+}
+
+// MustParseScript is like ParseScript, but panics if the given script cannot be
+// parsed. It simplifies safe initialization of Script values.
+func MustParseScript(s string) Script {
+ scr, err := ParseScript(s)
+ if err != nil {
+ panic(err)
+ }
+ return scr
+}
+
+// MustParseRegion is like ParseRegion, but panics if the given region cannot be
+// parsed. It simplifies safe initialization of Region values.
+func MustParseRegion(s string) Region {
+ r, err := ParseRegion(s)
+ if err != nil {
+ panic(err)
+ }
+ return r
+}
+
+var (
+ und = Tag{}
+
+ Und Tag = Tag{}
+
+ Afrikaans Tag = Tag(compact.Afrikaans)
+ Amharic Tag = Tag(compact.Amharic)
+ Arabic Tag = Tag(compact.Arabic)
+ ModernStandardArabic Tag = Tag(compact.ModernStandardArabic)
+ Azerbaijani Tag = Tag(compact.Azerbaijani)
+ Bulgarian Tag = Tag(compact.Bulgarian)
+ Bengali Tag = Tag(compact.Bengali)
+ Catalan Tag = Tag(compact.Catalan)
+ Czech Tag = Tag(compact.Czech)
+ Danish Tag = Tag(compact.Danish)
+ German Tag = Tag(compact.German)
+ Greek Tag = Tag(compact.Greek)
+ English Tag = Tag(compact.English)
+ AmericanEnglish Tag = Tag(compact.AmericanEnglish)
+ BritishEnglish Tag = Tag(compact.BritishEnglish)
+ Spanish Tag = Tag(compact.Spanish)
+ EuropeanSpanish Tag = Tag(compact.EuropeanSpanish)
+ LatinAmericanSpanish Tag = Tag(compact.LatinAmericanSpanish)
+ Estonian Tag = Tag(compact.Estonian)
+ Persian Tag = Tag(compact.Persian)
+ Finnish Tag = Tag(compact.Finnish)
+ Filipino Tag = Tag(compact.Filipino)
+ French Tag = Tag(compact.French)
+ CanadianFrench Tag = Tag(compact.CanadianFrench)
+ Gujarati Tag = Tag(compact.Gujarati)
+ Hebrew Tag = Tag(compact.Hebrew)
+ Hindi Tag = Tag(compact.Hindi)
+ Croatian Tag = Tag(compact.Croatian)
+ Hungarian Tag = Tag(compact.Hungarian)
+ Armenian Tag = Tag(compact.Armenian)
+ Indonesian Tag = Tag(compact.Indonesian)
+ Icelandic Tag = Tag(compact.Icelandic)
+ Italian Tag = Tag(compact.Italian)
+ Japanese Tag = Tag(compact.Japanese)
+ Georgian Tag = Tag(compact.Georgian)
+ Kazakh Tag = Tag(compact.Kazakh)
+ Khmer Tag = Tag(compact.Khmer)
+ Kannada Tag = Tag(compact.Kannada)
+ Korean Tag = Tag(compact.Korean)
+ Kirghiz Tag = Tag(compact.Kirghiz)
+ Lao Tag = Tag(compact.Lao)
+ Lithuanian Tag = Tag(compact.Lithuanian)
+ Latvian Tag = Tag(compact.Latvian)
+ Macedonian Tag = Tag(compact.Macedonian)
+ Malayalam Tag = Tag(compact.Malayalam)
+ Mongolian Tag = Tag(compact.Mongolian)
+ Marathi Tag = Tag(compact.Marathi)
+ Malay Tag = Tag(compact.Malay)
+ Burmese Tag = Tag(compact.Burmese)
+ Nepali Tag = Tag(compact.Nepali)
+ Dutch Tag = Tag(compact.Dutch)
+ Norwegian Tag = Tag(compact.Norwegian)
+ Punjabi Tag = Tag(compact.Punjabi)
+ Polish Tag = Tag(compact.Polish)
+ Portuguese Tag = Tag(compact.Portuguese)
+ BrazilianPortuguese Tag = Tag(compact.BrazilianPortuguese)
+ EuropeanPortuguese Tag = Tag(compact.EuropeanPortuguese)
+ Romanian Tag = Tag(compact.Romanian)
+ Russian Tag = Tag(compact.Russian)
+ Sinhala Tag = Tag(compact.Sinhala)
+ Slovak Tag = Tag(compact.Slovak)
+ Slovenian Tag = Tag(compact.Slovenian)
+ Albanian Tag = Tag(compact.Albanian)
+ Serbian Tag = Tag(compact.Serbian)
+ SerbianLatin Tag = Tag(compact.SerbianLatin)
+ Swedish Tag = Tag(compact.Swedish)
+ Swahili Tag = Tag(compact.Swahili)
+ Tamil Tag = Tag(compact.Tamil)
+ Telugu Tag = Tag(compact.Telugu)
+ Thai Tag = Tag(compact.Thai)
+ Turkish Tag = Tag(compact.Turkish)
+ Ukrainian Tag = Tag(compact.Ukrainian)
+ Urdu Tag = Tag(compact.Urdu)
+ Uzbek Tag = Tag(compact.Uzbek)
+ Vietnamese Tag = Tag(compact.Vietnamese)
+ Chinese Tag = Tag(compact.Chinese)
+ SimplifiedChinese Tag = Tag(compact.SimplifiedChinese)
+ TraditionalChinese Tag = Tag(compact.TraditionalChinese)
+ Zulu Tag = Tag(compact.Zulu)
+)
diff --git a/vendor/golang.org/x/text/message/catalog.go b/vendor/golang.org/x/text/message/catalog.go
new file mode 100644
index 000000000..068271def
--- /dev/null
+++ b/vendor/golang.org/x/text/message/catalog.go
@@ -0,0 +1,36 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package message
+
+// TODO: some types in this file will need to be made public at some time.
+// Documentation and method names will reflect this by using the exported name.
+
+import (
+ "golang.org/x/text/language"
+ "golang.org/x/text/message/catalog"
+)
+
+// MatchLanguage reports the matched tag obtained from language.MatchStrings for
+// the Matcher of the DefaultCatalog.
+func MatchLanguage(preferred ...string) language.Tag {
+ c := DefaultCatalog
+ tag, _ := language.MatchStrings(c.Matcher(), preferred...)
+ return tag
+}
+
+// DefaultCatalog is used by SetString.
+var DefaultCatalog catalog.Catalog = defaultCatalog
+
+var defaultCatalog = catalog.NewBuilder()
+
+// SetString calls SetString on the initial default Catalog.
+func SetString(tag language.Tag, key string, msg string) error {
+ return defaultCatalog.SetString(tag, key, msg)
+}
+
+// Set calls Set on the initial default Catalog.
+func Set(tag language.Tag, key string, msg ...catalog.Message) error {
+ return defaultCatalog.Set(tag, key, msg...)
+}
diff --git a/vendor/golang.org/x/text/message/catalog/catalog.go b/vendor/golang.org/x/text/message/catalog/catalog.go
new file mode 100644
index 000000000..96955d075
--- /dev/null
+++ b/vendor/golang.org/x/text/message/catalog/catalog.go
@@ -0,0 +1,365 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package catalog defines collections of translated format strings.
+//
+// This package mostly defines types for populating catalogs with messages. The
+// catmsg package contains further definitions for creating custom message and
+// dictionary types as well as packages that use Catalogs.
+//
+// Package catalog defines various interfaces: Dictionary, Loader, and Message.
+// A Dictionary maintains a set of translations of format strings for a single
+// language. The Loader interface defines a source of dictionaries. A
+// translation of a format string is represented by a Message.
+//
+// # Catalogs
+//
+// A Catalog defines a programmatic interface for setting message translations.
+// It maintains a set of per-language dictionaries with translations for a set
+// of keys. For message translation to function properly, a translation should
+// be defined for each key for each supported language. A dictionary may be
+// underspecified, though, if there is a parent language that already defines
+// the key. For example, a Dictionary for "en-GB" could leave out entries that
+// are identical to those in a dictionary for "en".
+//
+// # Messages
+//
+// A Message is a format string which varies on the value of substitution
+// variables. For instance, to indicate the number of results one could want "no
+// results" if there are none, "1 result" if there is 1, and "%d results" for
+// any other number. Catalog is agnostic to the kind of format strings that are
+// used: for instance, messages can follow either the printf-style substitution
+// from package fmt or use templates.
+//
+// A Message does not substitute arguments in the format string. This job is
+// reserved for packages that render strings, such as message, that use Catalogs
+// to selected string. This separation of concerns allows Catalog to be used to
+// store any kind of formatting strings.
+//
+// # Selecting messages based on linguistic features of substitution arguments
+//
+// Messages may vary based on any linguistic features of the argument values.
+// The most common one is plural form, but others exist.
+//
+// Selection messages are provided in packages that provide support for a
+// specific linguistic feature. The following snippet uses plural.Selectf:
+//
+// catalog.Set(language.English, "You are %d minute(s) late.",
+// plural.Selectf(1, "",
+// plural.One, "You are 1 minute late.",
+// plural.Other, "You are %d minutes late."))
+//
+// In this example, a message is stored in the Catalog where one of two messages
+// is selected based on the first argument, a number. The first message is
+// selected if the argument is singular (identified by the selector "one") and
+// the second message is selected in all other cases. The selectors are defined
+// by the plural rules defined in CLDR. The selector "other" is special and will
+// always match. Each language always defines one of the linguistic categories
+// to be "other." For English, singular is "one" and plural is "other".
+//
+// Selects can be nested. This allows selecting sentences based on features of
+// multiple arguments or multiple linguistic properties of a single argument.
+//
+// # String interpolation
+//
+// There is often a lot of commonality between the possible variants of a
+// message. For instance, in the example above the word "minute" varies based on
+// the plural catogory of the argument, but the rest of the sentence is
+// identical. Using interpolation the above message can be rewritten as:
+//
+// catalog.Set(language.English, "You are %d minute(s) late.",
+// catalog.Var("minutes",
+// plural.Selectf(1, "", plural.One, "minute", plural.Other, "minutes")),
+// catalog.String("You are %[1]d ${minutes} late."))
+//
+// Var is defined to return the variable name if the message does not yield a
+// match. This allows us to further simplify this snippet to
+//
+// catalog.Set(language.English, "You are %d minute(s) late.",
+// catalog.Var("minutes", plural.Selectf(1, "", plural.One, "minute")),
+// catalog.String("You are %d ${minutes} late."))
+//
+// Overall this is still only a minor improvement, but things can get a lot more
+// unwieldy if more than one linguistic feature is used to determine a message
+// variant. Consider the following example:
+//
+// // argument 1: list of hosts, argument 2: list of guests
+// catalog.Set(language.English, "%[1]v invite(s) %[2]v to their party.",
+// catalog.Var("their",
+// plural.Selectf(1, ""
+// plural.One, gender.Select(1, "female", "her", "other", "his"))),
+// catalog.Var("invites", plural.Selectf(1, "", plural.One, "invite"))
+// catalog.String("%[1]v ${invites} %[2]v to ${their} party.")),
+//
+// Without variable substitution, this would have to be written as
+//
+// // argument 1: list of hosts, argument 2: list of guests
+// catalog.Set(language.English, "%[1]v invite(s) %[2]v to their party.",
+// plural.Selectf(1, "",
+// plural.One, gender.Select(1,
+// "female", "%[1]v invites %[2]v to her party."
+// "other", "%[1]v invites %[2]v to his party."),
+// plural.Other, "%[1]v invites %[2]v to their party."))
+//
+// Not necessarily shorter, but using variables there is less duplication and
+// the messages are more maintenance friendly. Moreover, languages may have up
+// to six plural forms. This makes the use of variables more welcome.
+//
+// Different messages using the same inflections can reuse variables by moving
+// them to macros. Using macros we can rewrite the message as:
+//
+// // argument 1: list of hosts, argument 2: list of guests
+// catalog.SetString(language.English, "%[1]v invite(s) %[2]v to their party.",
+// "%[1]v ${invites(1)} %[2]v to ${their(1)} party.")
+//
+// Where the following macros were defined separately.
+//
+// catalog.SetMacro(language.English, "invites", plural.Selectf(1, "",
+// plural.One, "invite"))
+// catalog.SetMacro(language.English, "their", plural.Selectf(1, "",
+// plural.One, gender.Select(1, "female", "her", "other", "his"))),
+//
+// Placeholders use parentheses and the arguments to invoke a macro.
+//
+// # Looking up messages
+//
+// Message lookup using Catalogs is typically only done by specialized packages
+// and is not something the user should be concerned with. For instance, to
+// express the tardiness of a user using the related message we defined earlier,
+// the user may use the package message like so:
+//
+// p := message.NewPrinter(language.English)
+// p.Printf("You are %d minute(s) late.", 5)
+//
+// Which would print:
+//
+// You are 5 minutes late.
+//
+// This package is UNDER CONSTRUCTION and its API may change.
+package catalog // import "golang.org/x/text/message/catalog"
+
+// TODO:
+// Some way to freeze a catalog.
+// - Locking on each lockup turns out to be about 50% of the total running time
+// for some of the benchmarks in the message package.
+// Consider these:
+// - Sequence type to support sequences in user-defined messages.
+// - Garbage collection: Remove dictionaries that can no longer be reached
+// as other dictionaries have been added that cover all possible keys.
+
+import (
+ "errors"
+ "fmt"
+
+ "golang.org/x/text/internal"
+
+ "golang.org/x/text/internal/catmsg"
+ "golang.org/x/text/language"
+)
+
+// A Catalog allows lookup of translated messages.
+type Catalog interface {
+ // Languages returns all languages for which the Catalog contains variants.
+ Languages() []language.Tag
+
+ // Matcher returns a Matcher for languages from this Catalog.
+ Matcher() language.Matcher
+
+ // A Context is used for evaluating Messages.
+ Context(tag language.Tag, r catmsg.Renderer) *Context
+
+ // This method also makes Catalog a private interface.
+ lookup(tag language.Tag, key string) (data string, ok bool)
+}
+
+// NewFromMap creates a Catalog from the given map. If a Dictionary is
+// underspecified the entry is retrieved from a parent language.
+func NewFromMap(dictionaries map[string]Dictionary, opts ...Option) (Catalog, error) {
+ options := options{}
+ for _, o := range opts {
+ o(&options)
+ }
+ c := &catalog{
+ dicts: map[language.Tag]Dictionary{},
+ }
+ _, hasFallback := dictionaries[options.fallback.String()]
+ if hasFallback {
+ // TODO: Should it be okay to not have a fallback language?
+ // Catalog generators could enforce there is always a fallback.
+ c.langs = append(c.langs, options.fallback)
+ }
+ for lang, dict := range dictionaries {
+ tag, err := language.Parse(lang)
+ if err != nil {
+ return nil, fmt.Errorf("catalog: invalid language tag %q", lang)
+ }
+ if _, ok := c.dicts[tag]; ok {
+ return nil, fmt.Errorf("catalog: duplicate entry for tag %q after normalization", tag)
+ }
+ c.dicts[tag] = dict
+ if !hasFallback || tag != options.fallback {
+ c.langs = append(c.langs, tag)
+ }
+ }
+ if hasFallback {
+ internal.SortTags(c.langs[1:])
+ } else {
+ internal.SortTags(c.langs)
+ }
+ c.matcher = language.NewMatcher(c.langs)
+ return c, nil
+}
+
+// A Dictionary is a source of translations for a single language.
+type Dictionary interface {
+ // Lookup returns a message compiled with catmsg.Compile for the given key.
+ // It returns false for ok if such a message could not be found.
+ Lookup(key string) (data string, ok bool)
+}
+
+type catalog struct {
+ langs []language.Tag
+ dicts map[language.Tag]Dictionary
+ macros store
+ matcher language.Matcher
+}
+
+func (c *catalog) Languages() []language.Tag { return c.langs }
+func (c *catalog) Matcher() language.Matcher { return c.matcher }
+
+func (c *catalog) lookup(tag language.Tag, key string) (data string, ok bool) {
+ for ; ; tag = tag.Parent() {
+ if dict, ok := c.dicts[tag]; ok {
+ if data, ok := dict.Lookup(key); ok {
+ return data, true
+ }
+ }
+ if tag == language.Und {
+ break
+ }
+ }
+ return "", false
+}
+
+// Context returns a Context for formatting messages.
+// Only one Message may be formatted per context at any given time.
+func (c *catalog) Context(tag language.Tag, r catmsg.Renderer) *Context {
+ return &Context{
+ cat: c,
+ tag: tag,
+ dec: catmsg.NewDecoder(tag, r, &dict{&c.macros, tag}),
+ }
+}
+
+// A Builder allows building a Catalog programmatically.
+type Builder struct {
+ options
+ matcher language.Matcher
+
+ index store
+ macros store
+}
+
+type options struct {
+ fallback language.Tag
+}
+
+// An Option configures Catalog behavior.
+type Option func(*options)
+
+// Fallback specifies the default fallback language. The default is Und.
+func Fallback(tag language.Tag) Option {
+ return func(o *options) { o.fallback = tag }
+}
+
+// TODO:
+// // Catalogs specifies one or more sources for a Catalog.
+// // Lookups are in order.
+// // This can be changed inserting a Catalog used for setting, which implements
+// // Loader, used for setting in the chain.
+// func Catalogs(d ...Loader) Option {
+// return nil
+// }
+//
+// func Delims(start, end string) Option {}
+//
+// func Dict(tag language.Tag, d ...Dictionary) Option
+
+// NewBuilder returns an empty mutable Catalog.
+func NewBuilder(opts ...Option) *Builder {
+ c := &Builder{}
+ for _, o := range opts {
+ o(&c.options)
+ }
+ return c
+}
+
+// SetString is shorthand for Set(tag, key, String(msg)).
+func (c *Builder) SetString(tag language.Tag, key string, msg string) error {
+ return c.set(tag, key, &c.index, String(msg))
+}
+
+// Set sets the translation for the given language and key.
+//
+// When evaluation this message, the first Message in the sequence to msgs to
+// evaluate to a string will be the message returned.
+func (c *Builder) Set(tag language.Tag, key string, msg ...Message) error {
+ return c.set(tag, key, &c.index, msg...)
+}
+
+// SetMacro defines a Message that may be substituted in another message.
+// The arguments to a macro Message are passed as arguments in the
+// placeholder the form "${foo(arg1, arg2)}".
+func (c *Builder) SetMacro(tag language.Tag, name string, msg ...Message) error {
+ return c.set(tag, name, &c.macros, msg...)
+}
+
+// ErrNotFound indicates there was no message for the given key.
+var ErrNotFound = errors.New("catalog: message not found")
+
+// String specifies a plain message string. It can be used as fallback if no
+// other strings match or as a simple standalone message.
+//
+// It is an error to pass more than one String in a message sequence.
+func String(name string) Message {
+ return catmsg.String(name)
+}
+
+// Var sets a variable that may be substituted in formatting patterns using
+// named substitution of the form "${name}". The name argument is used as a
+// fallback if the statements do not produce a match. The statement sequence may
+// not contain any Var calls.
+//
+// The name passed to a Var must be unique within message sequence.
+func Var(name string, msg ...Message) Message {
+ return &catmsg.Var{Name: name, Message: firstInSequence(msg)}
+}
+
+// Context returns a Context for formatting messages.
+// Only one Message may be formatted per context at any given time.
+func (b *Builder) Context(tag language.Tag, r catmsg.Renderer) *Context {
+ return &Context{
+ cat: b,
+ tag: tag,
+ dec: catmsg.NewDecoder(tag, r, &dict{&b.macros, tag}),
+ }
+}
+
+// A Context is used for evaluating Messages.
+// Only one Message may be formatted per context at any given time.
+type Context struct {
+ cat Catalog
+ tag language.Tag // TODO: use compact index.
+ dec *catmsg.Decoder
+}
+
+// Execute looks up and executes the message with the given key.
+// It returns ErrNotFound if no message could be found in the index.
+func (c *Context) Execute(key string) error {
+ data, ok := c.cat.lookup(c.tag, key)
+ if !ok {
+ return ErrNotFound
+ }
+ return c.dec.Execute(data)
+}
diff --git a/vendor/golang.org/x/text/message/catalog/dict.go b/vendor/golang.org/x/text/message/catalog/dict.go
new file mode 100644
index 000000000..a0eb81810
--- /dev/null
+++ b/vendor/golang.org/x/text/message/catalog/dict.go
@@ -0,0 +1,129 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package catalog
+
+import (
+ "sync"
+
+ "golang.org/x/text/internal"
+ "golang.org/x/text/internal/catmsg"
+ "golang.org/x/text/language"
+)
+
+// TODO:
+// Dictionary returns a Dictionary that returns the first Message, using the
+// given language tag, that matches:
+// 1. the last one registered by one of the Set methods
+// 2. returned by one of the Loaders
+// 3. repeat from 1. using the parent language
+// This approach allows messages to be underspecified.
+// func (c *Catalog) Dictionary(tag language.Tag) (Dictionary, error) {
+// // TODO: verify dictionary exists.
+// return &dict{&c.index, tag}, nil
+// }
+
+type dict struct {
+ s *store
+ tag language.Tag // TODO: make compact tag.
+}
+
+func (d *dict) Lookup(key string) (data string, ok bool) {
+ return d.s.lookup(d.tag, key)
+}
+
+func (b *Builder) lookup(tag language.Tag, key string) (data string, ok bool) {
+ return b.index.lookup(tag, key)
+}
+
+func (c *Builder) set(tag language.Tag, key string, s *store, msg ...Message) error {
+ data, err := catmsg.Compile(tag, &dict{&c.macros, tag}, firstInSequence(msg))
+
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+
+ m := s.index[tag]
+ if m == nil {
+ m = msgMap{}
+ if s.index == nil {
+ s.index = map[language.Tag]msgMap{}
+ }
+ c.matcher = nil
+ s.index[tag] = m
+ }
+
+ m[key] = data
+ return err
+}
+
+func (c *Builder) Matcher() language.Matcher {
+ c.index.mutex.RLock()
+ m := c.matcher
+ c.index.mutex.RUnlock()
+ if m != nil {
+ return m
+ }
+
+ c.index.mutex.Lock()
+ if c.matcher == nil {
+ c.matcher = language.NewMatcher(c.unlockedLanguages())
+ }
+ m = c.matcher
+ c.index.mutex.Unlock()
+ return m
+}
+
+type store struct {
+ mutex sync.RWMutex
+ index map[language.Tag]msgMap
+}
+
+type msgMap map[string]string
+
+func (s *store) lookup(tag language.Tag, key string) (data string, ok bool) {
+ s.mutex.RLock()
+ defer s.mutex.RUnlock()
+
+ for ; ; tag = tag.Parent() {
+ if msgs, ok := s.index[tag]; ok {
+ if msg, ok := msgs[key]; ok {
+ return msg, true
+ }
+ }
+ if tag == language.Und {
+ break
+ }
+ }
+ return "", false
+}
+
+// Languages returns all languages for which the Catalog contains variants.
+func (b *Builder) Languages() []language.Tag {
+ s := &b.index
+ s.mutex.RLock()
+ defer s.mutex.RUnlock()
+
+ return b.unlockedLanguages()
+}
+
+func (b *Builder) unlockedLanguages() []language.Tag {
+ s := &b.index
+ if len(s.index) == 0 {
+ return nil
+ }
+ tags := make([]language.Tag, 0, len(s.index))
+ _, hasFallback := s.index[b.options.fallback]
+ offset := 0
+ if hasFallback {
+ tags = append(tags, b.options.fallback)
+ offset = 1
+ }
+ for t := range s.index {
+ if t != b.options.fallback {
+ tags = append(tags, t)
+ }
+ }
+ internal.SortTags(tags[offset:])
+ return tags
+}
diff --git a/vendor/golang.org/x/text/message/catalog/go19.go b/vendor/golang.org/x/text/message/catalog/go19.go
new file mode 100644
index 000000000..291a4df94
--- /dev/null
+++ b/vendor/golang.org/x/text/message/catalog/go19.go
@@ -0,0 +1,15 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.9
+
+package catalog
+
+import "golang.org/x/text/internal/catmsg"
+
+// A Message holds a collection of translations for the same phrase that may
+// vary based on the values of substitution arguments.
+type Message = catmsg.Message
+
+type firstInSequence = catmsg.FirstOf
diff --git a/vendor/golang.org/x/text/message/catalog/gopre19.go b/vendor/golang.org/x/text/message/catalog/gopre19.go
new file mode 100644
index 000000000..da44ebb8b
--- /dev/null
+++ b/vendor/golang.org/x/text/message/catalog/gopre19.go
@@ -0,0 +1,23 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.9
+
+package catalog
+
+import "golang.org/x/text/internal/catmsg"
+
+// A Message holds a collection of translations for the same phrase that may
+// vary based on the values of substitution arguments.
+type Message interface {
+ catmsg.Message
+}
+
+func firstInSequence(m []Message) catmsg.Message {
+ a := []catmsg.Message{}
+ for _, m := range m {
+ a = append(a, m)
+ }
+ return catmsg.FirstOf(a)
+}
diff --git a/vendor/golang.org/x/text/message/doc.go b/vendor/golang.org/x/text/message/doc.go
new file mode 100644
index 000000000..4bf7bdcac
--- /dev/null
+++ b/vendor/golang.org/x/text/message/doc.go
@@ -0,0 +1,99 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package message implements formatted I/O for localized strings with functions
+// analogous to the fmt's print functions. It is a drop-in replacement for fmt.
+//
+// # Localized Formatting
+//
+// A format string can be localized by replacing any of the print functions of
+// fmt with an equivalent call to a Printer.
+//
+// p := message.NewPrinter(message.MatchLanguage("en"))
+// p.Println(123456.78) // Prints 123,456.78
+//
+// p.Printf("%d ducks in a row", 4331) // Prints 4,331 ducks in a row
+//
+// p := message.NewPrinter(message.MatchLanguage("nl"))
+// p.Printf("Hoogte: %.1f meter", 1244.9) // Prints Hoogte: 1,244.9 meter
+//
+// p := message.NewPrinter(message.MatchLanguage("bn"))
+// p.Println(123456.78) // Prints ১,২৩,৪৫৬.৭৮
+//
+// Printer currently supports numbers and specialized types for which packages
+// exist in x/text. Other builtin types such as time.Time and slices are
+// planned.
+//
+// Format strings largely have the same meaning as with fmt with the following
+// notable exceptions:
+// - flag # always resorts to fmt for printing
+// - verb 'f', 'e', 'g', 'd' use localized formatting unless the '#' flag is
+// specified.
+// - verb 'm' inserts a translation of a string argument.
+//
+// See package fmt for more options.
+//
+// # Translation
+//
+// The format strings that are passed to Printf, Sprintf, Fprintf, or Errorf
+// are used as keys to look up translations for the specified languages.
+// More on how these need to be specified below.
+//
+// One can use arbitrary keys to distinguish between otherwise ambiguous
+// strings:
+//
+// p := message.NewPrinter(language.English)
+// p.Printf("archive(noun)") // Prints "archive"
+// p.Printf("archive(verb)") // Prints "archive"
+//
+// p := message.NewPrinter(language.German)
+// p.Printf("archive(noun)") // Prints "Archiv"
+// p.Printf("archive(verb)") // Prints "archivieren"
+//
+// To retain the fallback functionality, use Key:
+//
+// p.Printf(message.Key("archive(noun)", "archive"))
+// p.Printf(message.Key("archive(verb)", "archive"))
+//
+// # Translation Pipeline
+//
+// Format strings that contain text need to be translated to support different
+// locales. The first step is to extract strings that need to be translated.
+//
+// 1. Install gotext
+//
+// go get -u golang.org/x/text/cmd/gotext
+// gotext -help
+//
+// 2. Mark strings in your source to be translated by using message.Printer,
+// instead of the functions of the fmt package.
+//
+// 3. Extract the strings from your source
+//
+// gotext extract
+//
+// The output will be written to the textdata directory.
+//
+// 4. Send the files for translation
+//
+// It is planned to support multiple formats, but for now one will have to
+// rewrite the JSON output to the desired format.
+//
+// 5. Inject translations into program
+//
+// 6. Repeat from 2
+//
+// Right now this has to be done programmatically with calls to Set or
+// SetString. These functions as well as the methods defined in
+// see also package golang.org/x/text/message/catalog can be used to implement
+// either dynamic or static loading of messages.
+//
+// # Plural and Gender Forms
+//
+// Translated messages can vary based on the plural and gender forms of
+// substitution values. In general, it is up to the translators to provide
+// alternative translations for such forms. See the packages in
+// golang.org/x/text/feature and golang.org/x/text/message/catalog for more
+// information.
+package message
diff --git a/vendor/golang.org/x/text/message/format.go b/vendor/golang.org/x/text/message/format.go
new file mode 100644
index 000000000..a47d17dd4
--- /dev/null
+++ b/vendor/golang.org/x/text/message/format.go
@@ -0,0 +1,510 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package message
+
+import (
+ "bytes"
+ "strconv"
+ "unicode/utf8"
+
+ "golang.org/x/text/internal/format"
+)
+
+const (
+ ldigits = "0123456789abcdefx"
+ udigits = "0123456789ABCDEFX"
+)
+
+const (
+ signed = true
+ unsigned = false
+)
+
+// A formatInfo is the raw formatter used by Printf etc.
+// It prints into a buffer that must be set up separately.
+type formatInfo struct {
+ buf *bytes.Buffer
+
+ format.Parser
+
+ // intbuf is large enough to store %b of an int64 with a sign and
+ // avoids padding at the end of the struct on 32 bit architectures.
+ intbuf [68]byte
+}
+
+func (f *formatInfo) init(buf *bytes.Buffer) {
+ f.ClearFlags()
+ f.buf = buf
+}
+
+// writePadding generates n bytes of padding.
+func (f *formatInfo) writePadding(n int) {
+ if n <= 0 { // No padding bytes needed.
+ return
+ }
+ f.buf.Grow(n)
+ // Decide which byte the padding should be filled with.
+ padByte := byte(' ')
+ if f.Zero {
+ padByte = byte('0')
+ }
+ // Fill padding with padByte.
+ for i := 0; i < n; i++ {
+ f.buf.WriteByte(padByte) // TODO: make more efficient.
+ }
+}
+
+// pad appends b to f.buf, padded on left (!f.minus) or right (f.minus).
+func (f *formatInfo) pad(b []byte) {
+ if !f.WidthPresent || f.Width == 0 {
+ f.buf.Write(b)
+ return
+ }
+ width := f.Width - utf8.RuneCount(b)
+ if !f.Minus {
+ // left padding
+ f.writePadding(width)
+ f.buf.Write(b)
+ } else {
+ // right padding
+ f.buf.Write(b)
+ f.writePadding(width)
+ }
+}
+
+// padString appends s to f.buf, padded on left (!f.minus) or right (f.minus).
+func (f *formatInfo) padString(s string) {
+ if !f.WidthPresent || f.Width == 0 {
+ f.buf.WriteString(s)
+ return
+ }
+ width := f.Width - utf8.RuneCountInString(s)
+ if !f.Minus {
+ // left padding
+ f.writePadding(width)
+ f.buf.WriteString(s)
+ } else {
+ // right padding
+ f.buf.WriteString(s)
+ f.writePadding(width)
+ }
+}
+
+// fmt_boolean formats a boolean.
+func (f *formatInfo) fmt_boolean(v bool) {
+ if v {
+ f.padString("true")
+ } else {
+ f.padString("false")
+ }
+}
+
+// fmt_unicode formats a uint64 as "U+0078" or with f.sharp set as "U+0078 'x'".
+func (f *formatInfo) fmt_unicode(u uint64) {
+ buf := f.intbuf[0:]
+
+ // With default precision set the maximum needed buf length is 18
+ // for formatting -1 with %#U ("U+FFFFFFFFFFFFFFFF") which fits
+ // into the already allocated intbuf with a capacity of 68 bytes.
+ prec := 4
+ if f.PrecPresent && f.Prec > 4 {
+ prec = f.Prec
+ // Compute space needed for "U+" , number, " '", character, "'".
+ width := 2 + prec + 2 + utf8.UTFMax + 1
+ if width > len(buf) {
+ buf = make([]byte, width)
+ }
+ }
+
+ // Format into buf, ending at buf[i]. Formatting numbers is easier right-to-left.
+ i := len(buf)
+
+ // For %#U we want to add a space and a quoted character at the end of the buffer.
+ if f.Sharp && u <= utf8.MaxRune && strconv.IsPrint(rune(u)) {
+ i--
+ buf[i] = '\''
+ i -= utf8.RuneLen(rune(u))
+ utf8.EncodeRune(buf[i:], rune(u))
+ i--
+ buf[i] = '\''
+ i--
+ buf[i] = ' '
+ }
+ // Format the Unicode code point u as a hexadecimal number.
+ for u >= 16 {
+ i--
+ buf[i] = udigits[u&0xF]
+ prec--
+ u >>= 4
+ }
+ i--
+ buf[i] = udigits[u]
+ prec--
+ // Add zeros in front of the number until requested precision is reached.
+ for prec > 0 {
+ i--
+ buf[i] = '0'
+ prec--
+ }
+ // Add a leading "U+".
+ i--
+ buf[i] = '+'
+ i--
+ buf[i] = 'U'
+
+ oldZero := f.Zero
+ f.Zero = false
+ f.pad(buf[i:])
+ f.Zero = oldZero
+}
+
+// fmt_integer formats signed and unsigned integers.
+func (f *formatInfo) fmt_integer(u uint64, base int, isSigned bool, digits string) {
+ negative := isSigned && int64(u) < 0
+ if negative {
+ u = -u
+ }
+
+ buf := f.intbuf[0:]
+ // The already allocated f.intbuf with a capacity of 68 bytes
+ // is large enough for integer formatting when no precision or width is set.
+ if f.WidthPresent || f.PrecPresent {
+ // Account 3 extra bytes for possible addition of a sign and "0x".
+ width := 3 + f.Width + f.Prec // wid and prec are always positive.
+ if width > len(buf) {
+ // We're going to need a bigger boat.
+ buf = make([]byte, width)
+ }
+ }
+
+ // Two ways to ask for extra leading zero digits: %.3d or %03d.
+ // If both are specified the f.zero flag is ignored and
+ // padding with spaces is used instead.
+ prec := 0
+ if f.PrecPresent {
+ prec = f.Prec
+ // Precision of 0 and value of 0 means "print nothing" but padding.
+ if prec == 0 && u == 0 {
+ oldZero := f.Zero
+ f.Zero = false
+ f.writePadding(f.Width)
+ f.Zero = oldZero
+ return
+ }
+ } else if f.Zero && f.WidthPresent {
+ prec = f.Width
+ if negative || f.Plus || f.Space {
+ prec-- // leave room for sign
+ }
+ }
+
+ // Because printing is easier right-to-left: format u into buf, ending at buf[i].
+ // We could make things marginally faster by splitting the 32-bit case out
+ // into a separate block but it's not worth the duplication, so u has 64 bits.
+ i := len(buf)
+ // Use constants for the division and modulo for more efficient code.
+ // Switch cases ordered by popularity.
+ switch base {
+ case 10:
+ for u >= 10 {
+ i--
+ next := u / 10
+ buf[i] = byte('0' + u - next*10)
+ u = next
+ }
+ case 16:
+ for u >= 16 {
+ i--
+ buf[i] = digits[u&0xF]
+ u >>= 4
+ }
+ case 8:
+ for u >= 8 {
+ i--
+ buf[i] = byte('0' + u&7)
+ u >>= 3
+ }
+ case 2:
+ for u >= 2 {
+ i--
+ buf[i] = byte('0' + u&1)
+ u >>= 1
+ }
+ default:
+ panic("fmt: unknown base; can't happen")
+ }
+ i--
+ buf[i] = digits[u]
+ for i > 0 && prec > len(buf)-i {
+ i--
+ buf[i] = '0'
+ }
+
+ // Various prefixes: 0x, -, etc.
+ if f.Sharp {
+ switch base {
+ case 8:
+ if buf[i] != '0' {
+ i--
+ buf[i] = '0'
+ }
+ case 16:
+ // Add a leading 0x or 0X.
+ i--
+ buf[i] = digits[16]
+ i--
+ buf[i] = '0'
+ }
+ }
+
+ if negative {
+ i--
+ buf[i] = '-'
+ } else if f.Plus {
+ i--
+ buf[i] = '+'
+ } else if f.Space {
+ i--
+ buf[i] = ' '
+ }
+
+ // Left padding with zeros has already been handled like precision earlier
+ // or the f.zero flag is ignored due to an explicitly set precision.
+ oldZero := f.Zero
+ f.Zero = false
+ f.pad(buf[i:])
+ f.Zero = oldZero
+}
+
+// truncate truncates the string to the specified precision, if present.
+func (f *formatInfo) truncate(s string) string {
+ if f.PrecPresent {
+ n := f.Prec
+ for i := range s {
+ n--
+ if n < 0 {
+ return s[:i]
+ }
+ }
+ }
+ return s
+}
+
+// fmt_s formats a string.
+func (f *formatInfo) fmt_s(s string) {
+ s = f.truncate(s)
+ f.padString(s)
+}
+
+// fmt_sbx formats a string or byte slice as a hexadecimal encoding of its bytes.
+func (f *formatInfo) fmt_sbx(s string, b []byte, digits string) {
+ length := len(b)
+ if b == nil {
+ // No byte slice present. Assume string s should be encoded.
+ length = len(s)
+ }
+ // Set length to not process more bytes than the precision demands.
+ if f.PrecPresent && f.Prec < length {
+ length = f.Prec
+ }
+ // Compute width of the encoding taking into account the f.sharp and f.space flag.
+ width := 2 * length
+ if width > 0 {
+ if f.Space {
+ // Each element encoded by two hexadecimals will get a leading 0x or 0X.
+ if f.Sharp {
+ width *= 2
+ }
+ // Elements will be separated by a space.
+ width += length - 1
+ } else if f.Sharp {
+ // Only a leading 0x or 0X will be added for the whole string.
+ width += 2
+ }
+ } else { // The byte slice or string that should be encoded is empty.
+ if f.WidthPresent {
+ f.writePadding(f.Width)
+ }
+ return
+ }
+ // Handle padding to the left.
+ if f.WidthPresent && f.Width > width && !f.Minus {
+ f.writePadding(f.Width - width)
+ }
+ // Write the encoding directly into the output buffer.
+ buf := f.buf
+ if f.Sharp {
+ // Add leading 0x or 0X.
+ buf.WriteByte('0')
+ buf.WriteByte(digits[16])
+ }
+ var c byte
+ for i := 0; i < length; i++ {
+ if f.Space && i > 0 {
+ // Separate elements with a space.
+ buf.WriteByte(' ')
+ if f.Sharp {
+ // Add leading 0x or 0X for each element.
+ buf.WriteByte('0')
+ buf.WriteByte(digits[16])
+ }
+ }
+ if b != nil {
+ c = b[i] // Take a byte from the input byte slice.
+ } else {
+ c = s[i] // Take a byte from the input string.
+ }
+ // Encode each byte as two hexadecimal digits.
+ buf.WriteByte(digits[c>>4])
+ buf.WriteByte(digits[c&0xF])
+ }
+ // Handle padding to the right.
+ if f.WidthPresent && f.Width > width && f.Minus {
+ f.writePadding(f.Width - width)
+ }
+}
+
+// fmt_sx formats a string as a hexadecimal encoding of its bytes.
+func (f *formatInfo) fmt_sx(s, digits string) {
+ f.fmt_sbx(s, nil, digits)
+}
+
+// fmt_bx formats a byte slice as a hexadecimal encoding of its bytes.
+func (f *formatInfo) fmt_bx(b []byte, digits string) {
+ f.fmt_sbx("", b, digits)
+}
+
+// fmt_q formats a string as a double-quoted, escaped Go string constant.
+// If f.sharp is set a raw (backquoted) string may be returned instead
+// if the string does not contain any control characters other than tab.
+func (f *formatInfo) fmt_q(s string) {
+ s = f.truncate(s)
+ if f.Sharp && strconv.CanBackquote(s) {
+ f.padString("`" + s + "`")
+ return
+ }
+ buf := f.intbuf[:0]
+ if f.Plus {
+ f.pad(strconv.AppendQuoteToASCII(buf, s))
+ } else {
+ f.pad(strconv.AppendQuote(buf, s))
+ }
+}
+
+// fmt_c formats an integer as a Unicode character.
+// If the character is not valid Unicode, it will print '\ufffd'.
+func (f *formatInfo) fmt_c(c uint64) {
+ r := rune(c)
+ if c > utf8.MaxRune {
+ r = utf8.RuneError
+ }
+ buf := f.intbuf[:0]
+ w := utf8.EncodeRune(buf[:utf8.UTFMax], r)
+ f.pad(buf[:w])
+}
+
+// fmt_qc formats an integer as a single-quoted, escaped Go character constant.
+// If the character is not valid Unicode, it will print '\ufffd'.
+func (f *formatInfo) fmt_qc(c uint64) {
+ r := rune(c)
+ if c > utf8.MaxRune {
+ r = utf8.RuneError
+ }
+ buf := f.intbuf[:0]
+ if f.Plus {
+ f.pad(strconv.AppendQuoteRuneToASCII(buf, r))
+ } else {
+ f.pad(strconv.AppendQuoteRune(buf, r))
+ }
+}
+
+// fmt_float formats a float64. It assumes that verb is a valid format specifier
+// for strconv.AppendFloat and therefore fits into a byte.
+func (f *formatInfo) fmt_float(v float64, size int, verb rune, prec int) {
+ // Explicit precision in format specifier overrules default precision.
+ if f.PrecPresent {
+ prec = f.Prec
+ }
+ // Format number, reserving space for leading + sign if needed.
+ num := strconv.AppendFloat(f.intbuf[:1], v, byte(verb), prec, size)
+ if num[1] == '-' || num[1] == '+' {
+ num = num[1:]
+ } else {
+ num[0] = '+'
+ }
+ // f.space means to add a leading space instead of a "+" sign unless
+ // the sign is explicitly asked for by f.plus.
+ if f.Space && num[0] == '+' && !f.Plus {
+ num[0] = ' '
+ }
+ // Special handling for infinities and NaN,
+ // which don't look like a number so shouldn't be padded with zeros.
+ if num[1] == 'I' || num[1] == 'N' {
+ oldZero := f.Zero
+ f.Zero = false
+ // Remove sign before NaN if not asked for.
+ if num[1] == 'N' && !f.Space && !f.Plus {
+ num = num[1:]
+ }
+ f.pad(num)
+ f.Zero = oldZero
+ return
+ }
+ // The sharp flag forces printing a decimal point for non-binary formats
+ // and retains trailing zeros, which we may need to restore.
+ if f.Sharp && verb != 'b' {
+ digits := 0
+ switch verb {
+ case 'v', 'g', 'G':
+ digits = prec
+ // If no precision is set explicitly use a precision of 6.
+ if digits == -1 {
+ digits = 6
+ }
+ }
+
+ // Buffer pre-allocated with enough room for
+ // exponent notations of the form "e+123".
+ var tailBuf [5]byte
+ tail := tailBuf[:0]
+
+ hasDecimalPoint := false
+ // Starting from i = 1 to skip sign at num[0].
+ for i := 1; i < len(num); i++ {
+ switch num[i] {
+ case '.':
+ hasDecimalPoint = true
+ case 'e', 'E':
+ tail = append(tail, num[i:]...)
+ num = num[:i]
+ default:
+ digits--
+ }
+ }
+ if !hasDecimalPoint {
+ num = append(num, '.')
+ }
+ for digits > 0 {
+ num = append(num, '0')
+ digits--
+ }
+ num = append(num, tail...)
+ }
+ // We want a sign if asked for and if the sign is not positive.
+ if f.Plus || num[0] != '+' {
+ // If we're zero padding to the left we want the sign before the leading zeros.
+ // Achieve this by writing the sign out and then padding the unsigned number.
+ if f.Zero && f.WidthPresent && f.Width > len(num) {
+ f.buf.WriteByte(num[0])
+ f.writePadding(f.Width - len(num))
+ f.buf.Write(num[1:])
+ return
+ }
+ f.pad(num)
+ return
+ }
+ // No sign to show and the number is positive; just print the unsigned number.
+ f.pad(num[1:])
+}
diff --git a/vendor/golang.org/x/text/message/message.go b/vendor/golang.org/x/text/message/message.go
new file mode 100644
index 000000000..91a972642
--- /dev/null
+++ b/vendor/golang.org/x/text/message/message.go
@@ -0,0 +1,192 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package message // import "golang.org/x/text/message"
+
+import (
+ "io"
+ "os"
+
+ // Include features to facilitate generated catalogs.
+ _ "golang.org/x/text/feature/plural"
+
+ "golang.org/x/text/internal/number"
+ "golang.org/x/text/language"
+ "golang.org/x/text/message/catalog"
+)
+
+// A Printer implements language-specific formatted I/O analogous to the fmt
+// package.
+type Printer struct {
+ // the language
+ tag language.Tag
+
+ toDecimal number.Formatter
+ toScientific number.Formatter
+
+ cat catalog.Catalog
+}
+
+type options struct {
+ cat catalog.Catalog
+ // TODO:
+ // - allow %s to print integers in written form (tables are likely too large
+ // to enable this by default).
+ // - list behavior
+ //
+}
+
+// An Option defines an option of a Printer.
+type Option func(o *options)
+
+// Catalog defines the catalog to be used.
+func Catalog(c catalog.Catalog) Option {
+ return func(o *options) { o.cat = c }
+}
+
+// NewPrinter returns a Printer that formats messages tailored to language t.
+func NewPrinter(t language.Tag, opts ...Option) *Printer {
+ options := &options{
+ cat: DefaultCatalog,
+ }
+ for _, o := range opts {
+ o(options)
+ }
+ p := &Printer{
+ tag: t,
+ cat: options.cat,
+ }
+ p.toDecimal.InitDecimal(t)
+ p.toScientific.InitScientific(t)
+ return p
+}
+
+// Sprint is like fmt.Sprint, but using language-specific formatting.
+func (p *Printer) Sprint(a ...interface{}) string {
+ pp := newPrinter(p)
+ pp.doPrint(a)
+ s := pp.String()
+ pp.free()
+ return s
+}
+
+// Fprint is like fmt.Fprint, but using language-specific formatting.
+func (p *Printer) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+ pp := newPrinter(p)
+ pp.doPrint(a)
+ n64, err := io.Copy(w, &pp.Buffer)
+ pp.free()
+ return int(n64), err
+}
+
+// Print is like fmt.Print, but using language-specific formatting.
+func (p *Printer) Print(a ...interface{}) (n int, err error) {
+ return p.Fprint(os.Stdout, a...)
+}
+
+// Sprintln is like fmt.Sprintln, but using language-specific formatting.
+func (p *Printer) Sprintln(a ...interface{}) string {
+ pp := newPrinter(p)
+ pp.doPrintln(a)
+ s := pp.String()
+ pp.free()
+ return s
+}
+
+// Fprintln is like fmt.Fprintln, but using language-specific formatting.
+func (p *Printer) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+ pp := newPrinter(p)
+ pp.doPrintln(a)
+ n64, err := io.Copy(w, &pp.Buffer)
+ pp.free()
+ return int(n64), err
+}
+
+// Println is like fmt.Println, but using language-specific formatting.
+func (p *Printer) Println(a ...interface{}) (n int, err error) {
+ return p.Fprintln(os.Stdout, a...)
+}
+
+// Sprintf is like fmt.Sprintf, but using language-specific formatting.
+func (p *Printer) Sprintf(key Reference, a ...interface{}) string {
+ pp := newPrinter(p)
+ lookupAndFormat(pp, key, a)
+ s := pp.String()
+ pp.free()
+ return s
+}
+
+// Fprintf is like fmt.Fprintf, but using language-specific formatting.
+func (p *Printer) Fprintf(w io.Writer, key Reference, a ...interface{}) (n int, err error) {
+ pp := newPrinter(p)
+ lookupAndFormat(pp, key, a)
+ n, err = w.Write(pp.Bytes())
+ pp.free()
+ return n, err
+
+}
+
+// Printf is like fmt.Printf, but using language-specific formatting.
+func (p *Printer) Printf(key Reference, a ...interface{}) (n int, err error) {
+ pp := newPrinter(p)
+ lookupAndFormat(pp, key, a)
+ n, err = os.Stdout.Write(pp.Bytes())
+ pp.free()
+ return n, err
+}
+
+func lookupAndFormat(p *printer, r Reference, a []interface{}) {
+ p.fmt.Reset(a)
+ switch v := r.(type) {
+ case string:
+ if p.catContext.Execute(v) == catalog.ErrNotFound {
+ p.Render(v)
+ return
+ }
+ case key:
+ if p.catContext.Execute(v.id) == catalog.ErrNotFound &&
+ p.catContext.Execute(v.fallback) == catalog.ErrNotFound {
+ p.Render(v.fallback)
+ return
+ }
+ default:
+ panic("key argument is not a Reference")
+ }
+}
+
+type rawPrinter struct {
+ p *printer
+}
+
+func (p rawPrinter) Render(msg string) { p.p.WriteString(msg) }
+func (p rawPrinter) Arg(i int) interface{} { return nil }
+
+// Arg implements catmsg.Renderer.
+func (p *printer) Arg(i int) interface{} { // TODO, also return "ok" bool
+ i--
+ if uint(i) < uint(len(p.fmt.Args)) {
+ return p.fmt.Args[i]
+ }
+ return nil
+}
+
+// Render implements catmsg.Renderer.
+func (p *printer) Render(msg string) {
+ p.doPrintf(msg)
+}
+
+// A Reference is a string or a message reference.
+type Reference interface {
+ // TODO: also allow []string
+}
+
+// Key creates a message Reference for a message where the given id is used for
+// message lookup and the fallback is returned when no matches are found.
+func Key(id string, fallback string) Reference {
+ return key{id, fallback}
+}
+
+type key struct {
+ id, fallback string
+}
diff --git a/vendor/golang.org/x/text/message/print.go b/vendor/golang.org/x/text/message/print.go
new file mode 100644
index 000000000..da304cc0e
--- /dev/null
+++ b/vendor/golang.org/x/text/message/print.go
@@ -0,0 +1,984 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package message
+
+import (
+ "bytes"
+ "fmt" // TODO: consider copying interfaces from package fmt to avoid dependency.
+ "math"
+ "reflect"
+ "sync"
+ "unicode/utf8"
+
+ "golang.org/x/text/internal/format"
+ "golang.org/x/text/internal/number"
+ "golang.org/x/text/language"
+ "golang.org/x/text/message/catalog"
+)
+
+// Strings for use with buffer.WriteString.
+// This is less overhead than using buffer.Write with byte arrays.
+const (
+ commaSpaceString = ", "
+ nilAngleString = ""
+ nilParenString = "(nil)"
+ nilString = "nil"
+ mapString = "map["
+ percentBangString = "%!"
+ missingString = "(MISSING)"
+ badIndexString = "(BADINDEX)"
+ panicString = "(PANIC="
+ extraString = "%!(EXTRA "
+ badWidthString = "%!(BADWIDTH)"
+ badPrecString = "%!(BADPREC)"
+ noVerbString = "%!(NOVERB)"
+
+ invReflectString = ""
+)
+
+var printerPool = sync.Pool{
+ New: func() interface{} { return new(printer) },
+}
+
+// newPrinter allocates a new printer struct or grabs a cached one.
+func newPrinter(pp *Printer) *printer {
+ p := printerPool.Get().(*printer)
+ p.Printer = *pp
+ // TODO: cache most of the following call.
+ p.catContext = pp.cat.Context(pp.tag, p)
+
+ p.panicking = false
+ p.erroring = false
+ p.fmt.init(&p.Buffer)
+ return p
+}
+
+// free saves used printer structs in printerFree; avoids an allocation per invocation.
+func (p *printer) free() {
+ p.Buffer.Reset()
+ p.arg = nil
+ p.value = reflect.Value{}
+ printerPool.Put(p)
+}
+
+// printer is used to store a printer's state.
+// It implements "golang.org/x/text/internal/format".State.
+type printer struct {
+ Printer
+
+ // the context for looking up message translations
+ catContext *catalog.Context
+
+ // buffer for accumulating output.
+ bytes.Buffer
+
+ // arg holds the current item, as an interface{}.
+ arg interface{}
+ // value is used instead of arg for reflect values.
+ value reflect.Value
+
+ // fmt is used to format basic items such as integers or strings.
+ fmt formatInfo
+
+ // panicking is set by catchPanic to avoid infinite panic, recover, panic, ... recursion.
+ panicking bool
+ // erroring is set when printing an error string to guard against calling handleMethods.
+ erroring bool
+}
+
+// Language implements "golang.org/x/text/internal/format".State.
+func (p *printer) Language() language.Tag { return p.tag }
+
+func (p *printer) Width() (wid int, ok bool) { return p.fmt.Width, p.fmt.WidthPresent }
+
+func (p *printer) Precision() (prec int, ok bool) { return p.fmt.Prec, p.fmt.PrecPresent }
+
+func (p *printer) Flag(b int) bool {
+ switch b {
+ case '-':
+ return p.fmt.Minus
+ case '+':
+ return p.fmt.Plus || p.fmt.PlusV
+ case '#':
+ return p.fmt.Sharp || p.fmt.SharpV
+ case ' ':
+ return p.fmt.Space
+ case '0':
+ return p.fmt.Zero
+ }
+ return false
+}
+
+// getField gets the i'th field of the struct value.
+// If the field is itself is an interface, return a value for
+// the thing inside the interface, not the interface itself.
+func getField(v reflect.Value, i int) reflect.Value {
+ val := v.Field(i)
+ if val.Kind() == reflect.Interface && !val.IsNil() {
+ val = val.Elem()
+ }
+ return val
+}
+
+func (p *printer) unknownType(v reflect.Value) {
+ if !v.IsValid() {
+ p.WriteString(nilAngleString)
+ return
+ }
+ p.WriteByte('?')
+ p.WriteString(v.Type().String())
+ p.WriteByte('?')
+}
+
+func (p *printer) badVerb(verb rune) {
+ p.erroring = true
+ p.WriteString(percentBangString)
+ p.WriteRune(verb)
+ p.WriteByte('(')
+ switch {
+ case p.arg != nil:
+ p.WriteString(reflect.TypeOf(p.arg).String())
+ p.WriteByte('=')
+ p.printArg(p.arg, 'v')
+ case p.value.IsValid():
+ p.WriteString(p.value.Type().String())
+ p.WriteByte('=')
+ p.printValue(p.value, 'v', 0)
+ default:
+ p.WriteString(nilAngleString)
+ }
+ p.WriteByte(')')
+ p.erroring = false
+}
+
+func (p *printer) fmtBool(v bool, verb rune) {
+ switch verb {
+ case 't', 'v':
+ p.fmt.fmt_boolean(v)
+ default:
+ p.badVerb(verb)
+ }
+}
+
+// fmt0x64 formats a uint64 in hexadecimal and prefixes it with 0x or
+// not, as requested, by temporarily setting the sharp flag.
+func (p *printer) fmt0x64(v uint64, leading0x bool) {
+ sharp := p.fmt.Sharp
+ p.fmt.Sharp = leading0x
+ p.fmt.fmt_integer(v, 16, unsigned, ldigits)
+ p.fmt.Sharp = sharp
+}
+
+// fmtInteger formats a signed or unsigned integer.
+func (p *printer) fmtInteger(v uint64, isSigned bool, verb rune) {
+ switch verb {
+ case 'v':
+ if p.fmt.SharpV && !isSigned {
+ p.fmt0x64(v, true)
+ return
+ }
+ fallthrough
+ case 'd':
+ if p.fmt.Sharp || p.fmt.SharpV {
+ p.fmt.fmt_integer(v, 10, isSigned, ldigits)
+ } else {
+ p.fmtDecimalInt(v, isSigned)
+ }
+ case 'b':
+ p.fmt.fmt_integer(v, 2, isSigned, ldigits)
+ case 'o':
+ p.fmt.fmt_integer(v, 8, isSigned, ldigits)
+ case 'x':
+ p.fmt.fmt_integer(v, 16, isSigned, ldigits)
+ case 'X':
+ p.fmt.fmt_integer(v, 16, isSigned, udigits)
+ case 'c':
+ p.fmt.fmt_c(v)
+ case 'q':
+ if v <= utf8.MaxRune {
+ p.fmt.fmt_qc(v)
+ } else {
+ p.badVerb(verb)
+ }
+ case 'U':
+ p.fmt.fmt_unicode(v)
+ default:
+ p.badVerb(verb)
+ }
+}
+
+// fmtFloat formats a float. The default precision for each verb
+// is specified as last argument in the call to fmt_float.
+func (p *printer) fmtFloat(v float64, size int, verb rune) {
+ switch verb {
+ case 'b':
+ p.fmt.fmt_float(v, size, verb, -1)
+ case 'v':
+ verb = 'g'
+ fallthrough
+ case 'g', 'G':
+ if p.fmt.Sharp || p.fmt.SharpV {
+ p.fmt.fmt_float(v, size, verb, -1)
+ } else {
+ p.fmtVariableFloat(v, size)
+ }
+ case 'e', 'E':
+ if p.fmt.Sharp || p.fmt.SharpV {
+ p.fmt.fmt_float(v, size, verb, 6)
+ } else {
+ p.fmtScientific(v, size, 6)
+ }
+ case 'f', 'F':
+ if p.fmt.Sharp || p.fmt.SharpV {
+ p.fmt.fmt_float(v, size, verb, 6)
+ } else {
+ p.fmtDecimalFloat(v, size, 6)
+ }
+ default:
+ p.badVerb(verb)
+ }
+}
+
+func (p *printer) setFlags(f *number.Formatter) {
+ f.Flags &^= number.ElideSign
+ if p.fmt.Plus || p.fmt.Space {
+ f.Flags |= number.AlwaysSign
+ if !p.fmt.Plus {
+ f.Flags |= number.ElideSign
+ }
+ } else {
+ f.Flags &^= number.AlwaysSign
+ }
+}
+
+func (p *printer) updatePadding(f *number.Formatter) {
+ f.Flags &^= number.PadMask
+ if p.fmt.Minus {
+ f.Flags |= number.PadAfterSuffix
+ } else {
+ f.Flags |= number.PadBeforePrefix
+ }
+ f.PadRune = ' '
+ f.FormatWidth = uint16(p.fmt.Width)
+}
+
+func (p *printer) initDecimal(minFrac, maxFrac int) {
+ f := &p.toDecimal
+ f.MinIntegerDigits = 1
+ f.MaxIntegerDigits = 0
+ f.MinFractionDigits = uint8(minFrac)
+ f.MaxFractionDigits = int16(maxFrac)
+ p.setFlags(f)
+ f.PadRune = 0
+ if p.fmt.WidthPresent {
+ if p.fmt.Zero {
+ wid := p.fmt.Width
+ // Use significant integers for this.
+ // TODO: this is not the same as width, but so be it.
+ if f.MinFractionDigits > 0 {
+ wid -= 1 + int(f.MinFractionDigits)
+ }
+ if p.fmt.Plus || p.fmt.Space {
+ wid--
+ }
+ if wid > 0 && wid > int(f.MinIntegerDigits) {
+ f.MinIntegerDigits = uint8(wid)
+ }
+ }
+ p.updatePadding(f)
+ }
+}
+
+func (p *printer) initScientific(minFrac, maxFrac int) {
+ f := &p.toScientific
+ if maxFrac < 0 {
+ f.SetPrecision(maxFrac)
+ } else {
+ f.SetPrecision(maxFrac + 1)
+ f.MinFractionDigits = uint8(minFrac)
+ f.MaxFractionDigits = int16(maxFrac)
+ }
+ f.MinExponentDigits = 2
+ p.setFlags(f)
+ f.PadRune = 0
+ if p.fmt.WidthPresent {
+ f.Flags &^= number.PadMask
+ if p.fmt.Zero {
+ f.PadRune = f.Digit(0)
+ f.Flags |= number.PadAfterPrefix
+ } else {
+ f.PadRune = ' '
+ f.Flags |= number.PadBeforePrefix
+ }
+ p.updatePadding(f)
+ }
+}
+
+func (p *printer) fmtDecimalInt(v uint64, isSigned bool) {
+ var d number.Decimal
+
+ f := &p.toDecimal
+ if p.fmt.PrecPresent {
+ p.setFlags(f)
+ f.MinIntegerDigits = uint8(p.fmt.Prec)
+ f.MaxIntegerDigits = 0
+ f.MinFractionDigits = 0
+ f.MaxFractionDigits = 0
+ if p.fmt.WidthPresent {
+ p.updatePadding(f)
+ }
+ } else {
+ p.initDecimal(0, 0)
+ }
+ d.ConvertInt(p.toDecimal.RoundingContext, isSigned, v)
+
+ out := p.toDecimal.Format([]byte(nil), &d)
+ p.Buffer.Write(out)
+}
+
+func (p *printer) fmtDecimalFloat(v float64, size, prec int) {
+ var d number.Decimal
+ if p.fmt.PrecPresent {
+ prec = p.fmt.Prec
+ }
+ p.initDecimal(prec, prec)
+ d.ConvertFloat(p.toDecimal.RoundingContext, v, size)
+
+ out := p.toDecimal.Format([]byte(nil), &d)
+ p.Buffer.Write(out)
+}
+
+func (p *printer) fmtVariableFloat(v float64, size int) {
+ prec := -1
+ if p.fmt.PrecPresent {
+ prec = p.fmt.Prec
+ }
+ var d number.Decimal
+ p.initScientific(0, prec)
+ d.ConvertFloat(p.toScientific.RoundingContext, v, size)
+
+ // Copy logic of 'g' formatting from strconv. It is simplified a bit as
+ // we don't have to mind having prec > len(d.Digits).
+ shortest := prec < 0
+ ePrec := prec
+ if shortest {
+ prec = len(d.Digits)
+ ePrec = 6
+ } else if prec == 0 {
+ prec = 1
+ ePrec = 1
+ }
+ exp := int(d.Exp) - 1
+ if exp < -4 || exp >= ePrec {
+ p.initScientific(0, prec)
+
+ out := p.toScientific.Format([]byte(nil), &d)
+ p.Buffer.Write(out)
+ } else {
+ if prec > int(d.Exp) {
+ prec = len(d.Digits)
+ }
+ if prec -= int(d.Exp); prec < 0 {
+ prec = 0
+ }
+ p.initDecimal(0, prec)
+
+ out := p.toDecimal.Format([]byte(nil), &d)
+ p.Buffer.Write(out)
+ }
+}
+
+func (p *printer) fmtScientific(v float64, size, prec int) {
+ var d number.Decimal
+ if p.fmt.PrecPresent {
+ prec = p.fmt.Prec
+ }
+ p.initScientific(prec, prec)
+ rc := p.toScientific.RoundingContext
+ d.ConvertFloat(rc, v, size)
+
+ out := p.toScientific.Format([]byte(nil), &d)
+ p.Buffer.Write(out)
+
+}
+
+// fmtComplex formats a complex number v with
+// r = real(v) and j = imag(v) as (r+ji) using
+// fmtFloat for r and j formatting.
+func (p *printer) fmtComplex(v complex128, size int, verb rune) {
+ // Make sure any unsupported verbs are found before the
+ // calls to fmtFloat to not generate an incorrect error string.
+ switch verb {
+ case 'v', 'b', 'g', 'G', 'f', 'F', 'e', 'E':
+ p.WriteByte('(')
+ p.fmtFloat(real(v), size/2, verb)
+ // Imaginary part always has a sign.
+ if math.IsNaN(imag(v)) {
+ // By CLDR's rules, NaNs do not use patterns or signs. As this code
+ // relies on AlwaysSign working for imaginary parts, we need to
+ // manually handle NaNs.
+ f := &p.toScientific
+ p.setFlags(f)
+ p.updatePadding(f)
+ p.setFlags(f)
+ nan := f.Symbol(number.SymNan)
+ extra := 0
+ if w, ok := p.Width(); ok {
+ extra = w - utf8.RuneCountInString(nan) - 1
+ }
+ if f.Flags&number.PadAfterNumber == 0 {
+ for ; extra > 0; extra-- {
+ p.WriteRune(f.PadRune)
+ }
+ }
+ p.WriteString(f.Symbol(number.SymPlusSign))
+ p.WriteString(nan)
+ for ; extra > 0; extra-- {
+ p.WriteRune(f.PadRune)
+ }
+ p.WriteString("i)")
+ return
+ }
+ oldPlus := p.fmt.Plus
+ p.fmt.Plus = true
+ p.fmtFloat(imag(v), size/2, verb)
+ p.WriteString("i)") // TODO: use symbol?
+ p.fmt.Plus = oldPlus
+ default:
+ p.badVerb(verb)
+ }
+}
+
+func (p *printer) fmtString(v string, verb rune) {
+ switch verb {
+ case 'v':
+ if p.fmt.SharpV {
+ p.fmt.fmt_q(v)
+ } else {
+ p.fmt.fmt_s(v)
+ }
+ case 's':
+ p.fmt.fmt_s(v)
+ case 'x':
+ p.fmt.fmt_sx(v, ldigits)
+ case 'X':
+ p.fmt.fmt_sx(v, udigits)
+ case 'q':
+ p.fmt.fmt_q(v)
+ case 'm':
+ ctx := p.cat.Context(p.tag, rawPrinter{p})
+ if ctx.Execute(v) == catalog.ErrNotFound {
+ p.WriteString(v)
+ }
+ default:
+ p.badVerb(verb)
+ }
+}
+
+func (p *printer) fmtBytes(v []byte, verb rune, typeString string) {
+ switch verb {
+ case 'v', 'd':
+ if p.fmt.SharpV {
+ p.WriteString(typeString)
+ if v == nil {
+ p.WriteString(nilParenString)
+ return
+ }
+ p.WriteByte('{')
+ for i, c := range v {
+ if i > 0 {
+ p.WriteString(commaSpaceString)
+ }
+ p.fmt0x64(uint64(c), true)
+ }
+ p.WriteByte('}')
+ } else {
+ p.WriteByte('[')
+ for i, c := range v {
+ if i > 0 {
+ p.WriteByte(' ')
+ }
+ p.fmt.fmt_integer(uint64(c), 10, unsigned, ldigits)
+ }
+ p.WriteByte(']')
+ }
+ case 's':
+ p.fmt.fmt_s(string(v))
+ case 'x':
+ p.fmt.fmt_bx(v, ldigits)
+ case 'X':
+ p.fmt.fmt_bx(v, udigits)
+ case 'q':
+ p.fmt.fmt_q(string(v))
+ default:
+ p.printValue(reflect.ValueOf(v), verb, 0)
+ }
+}
+
+func (p *printer) fmtPointer(value reflect.Value, verb rune) {
+ var u uintptr
+ switch value.Kind() {
+ case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer:
+ u = value.Pointer()
+ default:
+ p.badVerb(verb)
+ return
+ }
+
+ switch verb {
+ case 'v':
+ if p.fmt.SharpV {
+ p.WriteByte('(')
+ p.WriteString(value.Type().String())
+ p.WriteString(")(")
+ if u == 0 {
+ p.WriteString(nilString)
+ } else {
+ p.fmt0x64(uint64(u), true)
+ }
+ p.WriteByte(')')
+ } else {
+ if u == 0 {
+ p.fmt.padString(nilAngleString)
+ } else {
+ p.fmt0x64(uint64(u), !p.fmt.Sharp)
+ }
+ }
+ case 'p':
+ p.fmt0x64(uint64(u), !p.fmt.Sharp)
+ case 'b', 'o', 'd', 'x', 'X':
+ if verb == 'd' {
+ p.fmt.Sharp = true // Print as standard go. TODO: does this make sense?
+ }
+ p.fmtInteger(uint64(u), unsigned, verb)
+ default:
+ p.badVerb(verb)
+ }
+}
+
+func (p *printer) catchPanic(arg interface{}, verb rune) {
+ if err := recover(); err != nil {
+ // If it's a nil pointer, just say "". The likeliest causes are a
+ // Stringer that fails to guard against nil or a nil pointer for a
+ // value receiver, and in either case, "" is a nice result.
+ if v := reflect.ValueOf(arg); v.Kind() == reflect.Ptr && v.IsNil() {
+ p.WriteString(nilAngleString)
+ return
+ }
+ // Otherwise print a concise panic message. Most of the time the panic
+ // value will print itself nicely.
+ if p.panicking {
+ // Nested panics; the recursion in printArg cannot succeed.
+ panic(err)
+ }
+
+ oldFlags := p.fmt.Parser
+ // For this output we want default behavior.
+ p.fmt.ClearFlags()
+
+ p.WriteString(percentBangString)
+ p.WriteRune(verb)
+ p.WriteString(panicString)
+ p.panicking = true
+ p.printArg(err, 'v')
+ p.panicking = false
+ p.WriteByte(')')
+
+ p.fmt.Parser = oldFlags
+ }
+}
+
+func (p *printer) handleMethods(verb rune) (handled bool) {
+ if p.erroring {
+ return
+ }
+ // Is it a Formatter?
+ if formatter, ok := p.arg.(format.Formatter); ok {
+ handled = true
+ defer p.catchPanic(p.arg, verb)
+ formatter.Format(p, verb)
+ return
+ }
+ if formatter, ok := p.arg.(fmt.Formatter); ok {
+ handled = true
+ defer p.catchPanic(p.arg, verb)
+ formatter.Format(p, verb)
+ return
+ }
+
+ // If we're doing Go syntax and the argument knows how to supply it, take care of it now.
+ if p.fmt.SharpV {
+ if stringer, ok := p.arg.(fmt.GoStringer); ok {
+ handled = true
+ defer p.catchPanic(p.arg, verb)
+ // Print the result of GoString unadorned.
+ p.fmt.fmt_s(stringer.GoString())
+ return
+ }
+ } else {
+ // If a string is acceptable according to the format, see if
+ // the value satisfies one of the string-valued interfaces.
+ // Println etc. set verb to %v, which is "stringable".
+ switch verb {
+ case 'v', 's', 'x', 'X', 'q':
+ // Is it an error or Stringer?
+ // The duplication in the bodies is necessary:
+ // setting handled and deferring catchPanic
+ // must happen before calling the method.
+ switch v := p.arg.(type) {
+ case error:
+ handled = true
+ defer p.catchPanic(p.arg, verb)
+ p.fmtString(v.Error(), verb)
+ return
+
+ case fmt.Stringer:
+ handled = true
+ defer p.catchPanic(p.arg, verb)
+ p.fmtString(v.String(), verb)
+ return
+ }
+ }
+ }
+ return false
+}
+
+func (p *printer) printArg(arg interface{}, verb rune) {
+ p.arg = arg
+ p.value = reflect.Value{}
+
+ if arg == nil {
+ switch verb {
+ case 'T', 'v':
+ p.fmt.padString(nilAngleString)
+ default:
+ p.badVerb(verb)
+ }
+ return
+ }
+
+ // Special processing considerations.
+ // %T (the value's type) and %p (its address) are special; we always do them first.
+ switch verb {
+ case 'T':
+ p.fmt.fmt_s(reflect.TypeOf(arg).String())
+ return
+ case 'p':
+ p.fmtPointer(reflect.ValueOf(arg), 'p')
+ return
+ }
+
+ // Some types can be done without reflection.
+ switch f := arg.(type) {
+ case bool:
+ p.fmtBool(f, verb)
+ case float32:
+ p.fmtFloat(float64(f), 32, verb)
+ case float64:
+ p.fmtFloat(f, 64, verb)
+ case complex64:
+ p.fmtComplex(complex128(f), 64, verb)
+ case complex128:
+ p.fmtComplex(f, 128, verb)
+ case int:
+ p.fmtInteger(uint64(f), signed, verb)
+ case int8:
+ p.fmtInteger(uint64(f), signed, verb)
+ case int16:
+ p.fmtInteger(uint64(f), signed, verb)
+ case int32:
+ p.fmtInteger(uint64(f), signed, verb)
+ case int64:
+ p.fmtInteger(uint64(f), signed, verb)
+ case uint:
+ p.fmtInteger(uint64(f), unsigned, verb)
+ case uint8:
+ p.fmtInteger(uint64(f), unsigned, verb)
+ case uint16:
+ p.fmtInteger(uint64(f), unsigned, verb)
+ case uint32:
+ p.fmtInteger(uint64(f), unsigned, verb)
+ case uint64:
+ p.fmtInteger(f, unsigned, verb)
+ case uintptr:
+ p.fmtInteger(uint64(f), unsigned, verb)
+ case string:
+ p.fmtString(f, verb)
+ case []byte:
+ p.fmtBytes(f, verb, "[]byte")
+ case reflect.Value:
+ // Handle extractable values with special methods
+ // since printValue does not handle them at depth 0.
+ if f.IsValid() && f.CanInterface() {
+ p.arg = f.Interface()
+ if p.handleMethods(verb) {
+ return
+ }
+ }
+ p.printValue(f, verb, 0)
+ default:
+ // If the type is not simple, it might have methods.
+ if !p.handleMethods(verb) {
+ // Need to use reflection, since the type had no
+ // interface methods that could be used for formatting.
+ p.printValue(reflect.ValueOf(f), verb, 0)
+ }
+ }
+}
+
+// printValue is similar to printArg but starts with a reflect value, not an interface{} value.
+// It does not handle 'p' and 'T' verbs because these should have been already handled by printArg.
+func (p *printer) printValue(value reflect.Value, verb rune, depth int) {
+ // Handle values with special methods if not already handled by printArg (depth == 0).
+ if depth > 0 && value.IsValid() && value.CanInterface() {
+ p.arg = value.Interface()
+ if p.handleMethods(verb) {
+ return
+ }
+ }
+ p.arg = nil
+ p.value = value
+
+ switch f := value; value.Kind() {
+ case reflect.Invalid:
+ if depth == 0 {
+ p.WriteString(invReflectString)
+ } else {
+ switch verb {
+ case 'v':
+ p.WriteString(nilAngleString)
+ default:
+ p.badVerb(verb)
+ }
+ }
+ case reflect.Bool:
+ p.fmtBool(f.Bool(), verb)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ p.fmtInteger(uint64(f.Int()), signed, verb)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ p.fmtInteger(f.Uint(), unsigned, verb)
+ case reflect.Float32:
+ p.fmtFloat(f.Float(), 32, verb)
+ case reflect.Float64:
+ p.fmtFloat(f.Float(), 64, verb)
+ case reflect.Complex64:
+ p.fmtComplex(f.Complex(), 64, verb)
+ case reflect.Complex128:
+ p.fmtComplex(f.Complex(), 128, verb)
+ case reflect.String:
+ p.fmtString(f.String(), verb)
+ case reflect.Map:
+ if p.fmt.SharpV {
+ p.WriteString(f.Type().String())
+ if f.IsNil() {
+ p.WriteString(nilParenString)
+ return
+ }
+ p.WriteByte('{')
+ } else {
+ p.WriteString(mapString)
+ }
+ keys := f.MapKeys()
+ for i, key := range keys {
+ if i > 0 {
+ if p.fmt.SharpV {
+ p.WriteString(commaSpaceString)
+ } else {
+ p.WriteByte(' ')
+ }
+ }
+ p.printValue(key, verb, depth+1)
+ p.WriteByte(':')
+ p.printValue(f.MapIndex(key), verb, depth+1)
+ }
+ if p.fmt.SharpV {
+ p.WriteByte('}')
+ } else {
+ p.WriteByte(']')
+ }
+ case reflect.Struct:
+ if p.fmt.SharpV {
+ p.WriteString(f.Type().String())
+ }
+ p.WriteByte('{')
+ for i := 0; i < f.NumField(); i++ {
+ if i > 0 {
+ if p.fmt.SharpV {
+ p.WriteString(commaSpaceString)
+ } else {
+ p.WriteByte(' ')
+ }
+ }
+ if p.fmt.PlusV || p.fmt.SharpV {
+ if name := f.Type().Field(i).Name; name != "" {
+ p.WriteString(name)
+ p.WriteByte(':')
+ }
+ }
+ p.printValue(getField(f, i), verb, depth+1)
+ }
+ p.WriteByte('}')
+ case reflect.Interface:
+ value := f.Elem()
+ if !value.IsValid() {
+ if p.fmt.SharpV {
+ p.WriteString(f.Type().String())
+ p.WriteString(nilParenString)
+ } else {
+ p.WriteString(nilAngleString)
+ }
+ } else {
+ p.printValue(value, verb, depth+1)
+ }
+ case reflect.Array, reflect.Slice:
+ switch verb {
+ case 's', 'q', 'x', 'X':
+ // Handle byte and uint8 slices and arrays special for the above verbs.
+ t := f.Type()
+ if t.Elem().Kind() == reflect.Uint8 {
+ var bytes []byte
+ if f.Kind() == reflect.Slice {
+ bytes = f.Bytes()
+ } else if f.CanAddr() {
+ bytes = f.Slice(0, f.Len()).Bytes()
+ } else {
+ // We have an array, but we cannot Slice() a non-addressable array,
+ // so we build a slice by hand. This is a rare case but it would be nice
+ // if reflection could help a little more.
+ bytes = make([]byte, f.Len())
+ for i := range bytes {
+ bytes[i] = byte(f.Index(i).Uint())
+ }
+ }
+ p.fmtBytes(bytes, verb, t.String())
+ return
+ }
+ }
+ if p.fmt.SharpV {
+ p.WriteString(f.Type().String())
+ if f.Kind() == reflect.Slice && f.IsNil() {
+ p.WriteString(nilParenString)
+ return
+ }
+ p.WriteByte('{')
+ for i := 0; i < f.Len(); i++ {
+ if i > 0 {
+ p.WriteString(commaSpaceString)
+ }
+ p.printValue(f.Index(i), verb, depth+1)
+ }
+ p.WriteByte('}')
+ } else {
+ p.WriteByte('[')
+ for i := 0; i < f.Len(); i++ {
+ if i > 0 {
+ p.WriteByte(' ')
+ }
+ p.printValue(f.Index(i), verb, depth+1)
+ }
+ p.WriteByte(']')
+ }
+ case reflect.Ptr:
+ // pointer to array or slice or struct? ok at top level
+ // but not embedded (avoid loops)
+ if depth == 0 && f.Pointer() != 0 {
+ switch a := f.Elem(); a.Kind() {
+ case reflect.Array, reflect.Slice, reflect.Struct, reflect.Map:
+ p.WriteByte('&')
+ p.printValue(a, verb, depth+1)
+ return
+ }
+ }
+ fallthrough
+ case reflect.Chan, reflect.Func, reflect.UnsafePointer:
+ p.fmtPointer(f, verb)
+ default:
+ p.unknownType(f)
+ }
+}
+
+func (p *printer) badArgNum(verb rune) {
+ p.WriteString(percentBangString)
+ p.WriteRune(verb)
+ p.WriteString(badIndexString)
+}
+
+func (p *printer) missingArg(verb rune) {
+ p.WriteString(percentBangString)
+ p.WriteRune(verb)
+ p.WriteString(missingString)
+}
+
+func (p *printer) doPrintf(fmt string) {
+ for p.fmt.Parser.SetFormat(fmt); p.fmt.Scan(); {
+ switch p.fmt.Status {
+ case format.StatusText:
+ p.WriteString(p.fmt.Text())
+ case format.StatusSubstitution:
+ p.printArg(p.Arg(p.fmt.ArgNum), p.fmt.Verb)
+ case format.StatusBadWidthSubstitution:
+ p.WriteString(badWidthString)
+ p.printArg(p.Arg(p.fmt.ArgNum), p.fmt.Verb)
+ case format.StatusBadPrecSubstitution:
+ p.WriteString(badPrecString)
+ p.printArg(p.Arg(p.fmt.ArgNum), p.fmt.Verb)
+ case format.StatusNoVerb:
+ p.WriteString(noVerbString)
+ case format.StatusBadArgNum:
+ p.badArgNum(p.fmt.Verb)
+ case format.StatusMissingArg:
+ p.missingArg(p.fmt.Verb)
+ default:
+ panic("unreachable")
+ }
+ }
+
+ // Check for extra arguments, but only if there was at least one ordered
+ // argument. Note that this behavior is necessarily different from fmt:
+ // different variants of messages may opt to drop some or all of the
+ // arguments.
+ if !p.fmt.Reordered && p.fmt.ArgNum < len(p.fmt.Args) && p.fmt.ArgNum != 0 {
+ p.fmt.ClearFlags()
+ p.WriteString(extraString)
+ for i, arg := range p.fmt.Args[p.fmt.ArgNum:] {
+ if i > 0 {
+ p.WriteString(commaSpaceString)
+ }
+ if arg == nil {
+ p.WriteString(nilAngleString)
+ } else {
+ p.WriteString(reflect.TypeOf(arg).String())
+ p.WriteString("=")
+ p.printArg(arg, 'v')
+ }
+ }
+ p.WriteByte(')')
+ }
+}
+
+func (p *printer) doPrint(a []interface{}) {
+ prevString := false
+ for argNum, arg := range a {
+ isString := arg != nil && reflect.TypeOf(arg).Kind() == reflect.String
+ // Add a space between two non-string arguments.
+ if argNum > 0 && !isString && !prevString {
+ p.WriteByte(' ')
+ }
+ p.printArg(arg, 'v')
+ prevString = isString
+ }
+}
+
+// doPrintln is like doPrint but always adds a space between arguments
+// and a newline after the last argument.
+func (p *printer) doPrintln(a []interface{}) {
+ for argNum, arg := range a {
+ if argNum > 0 {
+ p.WriteByte(' ')
+ }
+ p.printArg(arg, 'v')
+ }
+ p.WriteByte('\n')
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/LICENSE b/vendor/google.golang.org/genproto/googleapis/api/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go
new file mode 100644
index 000000000..8b462f3df
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go
@@ -0,0 +1,119 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v4.24.4
+// source: google/api/annotations.proto
+
+package annotations
+
+import (
+ reflect "reflect"
+
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+var file_google_api_annotations_proto_extTypes = []protoimpl.ExtensionInfo{
+ {
+ ExtendedType: (*descriptorpb.MethodOptions)(nil),
+ ExtensionType: (*HttpRule)(nil),
+ Field: 72295728,
+ Name: "google.api.http",
+ Tag: "bytes,72295728,opt,name=http",
+ Filename: "google/api/annotations.proto",
+ },
+}
+
+// Extension fields to descriptorpb.MethodOptions.
+var (
+ // See `HttpRule`.
+ //
+ // optional google.api.HttpRule http = 72295728;
+ E_Http = &file_google_api_annotations_proto_extTypes[0]
+)
+
+var File_google_api_annotations_proto protoreflect.FileDescriptor
+
+var file_google_api_annotations_proto_rawDesc = []byte{
+ 0x0a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e,
+ 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x15, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x3a, 0x4b, 0x0a, 0x04, 0x68, 0x74, 0x74, 0x70, 0x12, 0x1e, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65,
+ 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xb0, 0xca, 0xbc, 0x22,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70,
+ 0x69, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x04, 0x68, 0x74, 0x74, 0x70,
+ 0x42, 0x6e, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61,
+ 0x70, 0x69, 0x42, 0x10, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50,
+ 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67,
+ 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70,
+ 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e,
+ 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49,
+ 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var file_google_api_annotations_proto_goTypes = []interface{}{
+ (*descriptorpb.MethodOptions)(nil), // 0: google.protobuf.MethodOptions
+ (*HttpRule)(nil), // 1: google.api.HttpRule
+}
+var file_google_api_annotations_proto_depIdxs = []int32{
+ 0, // 0: google.api.http:extendee -> google.protobuf.MethodOptions
+ 1, // 1: google.api.http:type_name -> google.api.HttpRule
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 1, // [1:2] is the sub-list for extension type_name
+ 0, // [0:1] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_google_api_annotations_proto_init() }
+func file_google_api_annotations_proto_init() {
+ if File_google_api_annotations_proto != nil {
+ return
+ }
+ file_google_api_http_proto_init()
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_api_annotations_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 0,
+ NumExtensions: 1,
+ NumServices: 0,
+ },
+ GoTypes: file_google_api_annotations_proto_goTypes,
+ DependencyIndexes: file_google_api_annotations_proto_depIdxs,
+ ExtensionInfos: file_google_api_annotations_proto_extTypes,
+ }.Build()
+ File_google_api_annotations_proto = out.File
+ file_google_api_annotations_proto_rawDesc = nil
+ file_google_api_annotations_proto_goTypes = nil
+ file_google_api_annotations_proto_depIdxs = nil
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go
new file mode 100644
index 000000000..db7806cb9
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go
@@ -0,0 +1,2103 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v4.24.4
+// source: google/api/client.proto
+
+package annotations
+
+import (
+ reflect "reflect"
+ sync "sync"
+
+ api "google.golang.org/genproto/googleapis/api"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+ durationpb "google.golang.org/protobuf/types/known/durationpb"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// The organization for which the client libraries are being published.
+// Affects the url where generated docs are published, etc.
+type ClientLibraryOrganization int32
+
+const (
+ // Not useful.
+ ClientLibraryOrganization_CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED ClientLibraryOrganization = 0
+ // Google Cloud Platform Org.
+ ClientLibraryOrganization_CLOUD ClientLibraryOrganization = 1
+ // Ads (Advertising) Org.
+ ClientLibraryOrganization_ADS ClientLibraryOrganization = 2
+ // Photos Org.
+ ClientLibraryOrganization_PHOTOS ClientLibraryOrganization = 3
+ // Street View Org.
+ ClientLibraryOrganization_STREET_VIEW ClientLibraryOrganization = 4
+ // Shopping Org.
+ ClientLibraryOrganization_SHOPPING ClientLibraryOrganization = 5
+ // Geo Org.
+ ClientLibraryOrganization_GEO ClientLibraryOrganization = 6
+ // Generative AI - https://developers.generativeai.google
+ ClientLibraryOrganization_GENERATIVE_AI ClientLibraryOrganization = 7
+)
+
+// Enum value maps for ClientLibraryOrganization.
+var (
+ ClientLibraryOrganization_name = map[int32]string{
+ 0: "CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED",
+ 1: "CLOUD",
+ 2: "ADS",
+ 3: "PHOTOS",
+ 4: "STREET_VIEW",
+ 5: "SHOPPING",
+ 6: "GEO",
+ 7: "GENERATIVE_AI",
+ }
+ ClientLibraryOrganization_value = map[string]int32{
+ "CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED": 0,
+ "CLOUD": 1,
+ "ADS": 2,
+ "PHOTOS": 3,
+ "STREET_VIEW": 4,
+ "SHOPPING": 5,
+ "GEO": 6,
+ "GENERATIVE_AI": 7,
+ }
+)
+
+func (x ClientLibraryOrganization) Enum() *ClientLibraryOrganization {
+ p := new(ClientLibraryOrganization)
+ *p = x
+ return p
+}
+
+func (x ClientLibraryOrganization) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (ClientLibraryOrganization) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_api_client_proto_enumTypes[0].Descriptor()
+}
+
+func (ClientLibraryOrganization) Type() protoreflect.EnumType {
+ return &file_google_api_client_proto_enumTypes[0]
+}
+
+func (x ClientLibraryOrganization) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use ClientLibraryOrganization.Descriptor instead.
+func (ClientLibraryOrganization) EnumDescriptor() ([]byte, []int) {
+ return file_google_api_client_proto_rawDescGZIP(), []int{0}
+}
+
+// To where should client libraries be published?
+type ClientLibraryDestination int32
+
+const (
+ // Client libraries will neither be generated nor published to package
+ // managers.
+ ClientLibraryDestination_CLIENT_LIBRARY_DESTINATION_UNSPECIFIED ClientLibraryDestination = 0
+ // Generate the client library in a repo under github.com/googleapis,
+ // but don't publish it to package managers.
+ ClientLibraryDestination_GITHUB ClientLibraryDestination = 10
+ // Publish the library to package managers like nuget.org and npmjs.com.
+ ClientLibraryDestination_PACKAGE_MANAGER ClientLibraryDestination = 20
+)
+
+// Enum value maps for ClientLibraryDestination.
+var (
+ ClientLibraryDestination_name = map[int32]string{
+ 0: "CLIENT_LIBRARY_DESTINATION_UNSPECIFIED",
+ 10: "GITHUB",
+ 20: "PACKAGE_MANAGER",
+ }
+ ClientLibraryDestination_value = map[string]int32{
+ "CLIENT_LIBRARY_DESTINATION_UNSPECIFIED": 0,
+ "GITHUB": 10,
+ "PACKAGE_MANAGER": 20,
+ }
+)
+
+func (x ClientLibraryDestination) Enum() *ClientLibraryDestination {
+ p := new(ClientLibraryDestination)
+ *p = x
+ return p
+}
+
+func (x ClientLibraryDestination) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (ClientLibraryDestination) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_api_client_proto_enumTypes[1].Descriptor()
+}
+
+func (ClientLibraryDestination) Type() protoreflect.EnumType {
+ return &file_google_api_client_proto_enumTypes[1]
+}
+
+func (x ClientLibraryDestination) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use ClientLibraryDestination.Descriptor instead.
+func (ClientLibraryDestination) EnumDescriptor() ([]byte, []int) {
+ return file_google_api_client_proto_rawDescGZIP(), []int{1}
+}
+
+// Required information for every language.
+type CommonLanguageSettings struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Link to automatically generated reference documentation. Example:
+ // https://cloud.google.com/nodejs/docs/reference/asset/latest
+ //
+ // Deprecated: Do not use.
+ ReferenceDocsUri string `protobuf:"bytes,1,opt,name=reference_docs_uri,json=referenceDocsUri,proto3" json:"reference_docs_uri,omitempty"`
+ // The destination where API teams want this client library to be published.
+ Destinations []ClientLibraryDestination `protobuf:"varint,2,rep,packed,name=destinations,proto3,enum=google.api.ClientLibraryDestination" json:"destinations,omitempty"`
+ // Configuration for which RPCs should be generated in the GAPIC client.
+ SelectiveGapicGeneration *SelectiveGapicGeneration `protobuf:"bytes,3,opt,name=selective_gapic_generation,json=selectiveGapicGeneration,proto3" json:"selective_gapic_generation,omitempty"`
+}
+
+func (x *CommonLanguageSettings) Reset() {
+ *x = CommonLanguageSettings{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_client_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CommonLanguageSettings) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CommonLanguageSettings) ProtoMessage() {}
+
+func (x *CommonLanguageSettings) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_client_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CommonLanguageSettings.ProtoReflect.Descriptor instead.
+func (*CommonLanguageSettings) Descriptor() ([]byte, []int) {
+ return file_google_api_client_proto_rawDescGZIP(), []int{0}
+}
+
+// Deprecated: Do not use.
+func (x *CommonLanguageSettings) GetReferenceDocsUri() string {
+ if x != nil {
+ return x.ReferenceDocsUri
+ }
+ return ""
+}
+
+func (x *CommonLanguageSettings) GetDestinations() []ClientLibraryDestination {
+ if x != nil {
+ return x.Destinations
+ }
+ return nil
+}
+
+func (x *CommonLanguageSettings) GetSelectiveGapicGeneration() *SelectiveGapicGeneration {
+ if x != nil {
+ return x.SelectiveGapicGeneration
+ }
+ return nil
+}
+
+// Details about how and where to publish client libraries.
+type ClientLibrarySettings struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Version of the API to apply these settings to. This is the full protobuf
+ // package for the API, ending in the version element.
+ // Examples: "google.cloud.speech.v1" and "google.spanner.admin.database.v1".
+ Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"`
+ // Launch stage of this version of the API.
+ LaunchStage api.LaunchStage `protobuf:"varint,2,opt,name=launch_stage,json=launchStage,proto3,enum=google.api.LaunchStage" json:"launch_stage,omitempty"`
+ // When using transport=rest, the client request will encode enums as
+ // numbers rather than strings.
+ RestNumericEnums bool `protobuf:"varint,3,opt,name=rest_numeric_enums,json=restNumericEnums,proto3" json:"rest_numeric_enums,omitempty"`
+ // Settings for legacy Java features, supported in the Service YAML.
+ JavaSettings *JavaSettings `protobuf:"bytes,21,opt,name=java_settings,json=javaSettings,proto3" json:"java_settings,omitempty"`
+ // Settings for C++ client libraries.
+ CppSettings *CppSettings `protobuf:"bytes,22,opt,name=cpp_settings,json=cppSettings,proto3" json:"cpp_settings,omitempty"`
+ // Settings for PHP client libraries.
+ PhpSettings *PhpSettings `protobuf:"bytes,23,opt,name=php_settings,json=phpSettings,proto3" json:"php_settings,omitempty"`
+ // Settings for Python client libraries.
+ PythonSettings *PythonSettings `protobuf:"bytes,24,opt,name=python_settings,json=pythonSettings,proto3" json:"python_settings,omitempty"`
+ // Settings for Node client libraries.
+ NodeSettings *NodeSettings `protobuf:"bytes,25,opt,name=node_settings,json=nodeSettings,proto3" json:"node_settings,omitempty"`
+ // Settings for .NET client libraries.
+ DotnetSettings *DotnetSettings `protobuf:"bytes,26,opt,name=dotnet_settings,json=dotnetSettings,proto3" json:"dotnet_settings,omitempty"`
+ // Settings for Ruby client libraries.
+ RubySettings *RubySettings `protobuf:"bytes,27,opt,name=ruby_settings,json=rubySettings,proto3" json:"ruby_settings,omitempty"`
+ // Settings for Go client libraries.
+ GoSettings *GoSettings `protobuf:"bytes,28,opt,name=go_settings,json=goSettings,proto3" json:"go_settings,omitempty"`
+}
+
+func (x *ClientLibrarySettings) Reset() {
+ *x = ClientLibrarySettings{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_client_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ClientLibrarySettings) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ClientLibrarySettings) ProtoMessage() {}
+
+func (x *ClientLibrarySettings) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_client_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ClientLibrarySettings.ProtoReflect.Descriptor instead.
+func (*ClientLibrarySettings) Descriptor() ([]byte, []int) {
+ return file_google_api_client_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ClientLibrarySettings) GetVersion() string {
+ if x != nil {
+ return x.Version
+ }
+ return ""
+}
+
+func (x *ClientLibrarySettings) GetLaunchStage() api.LaunchStage {
+ if x != nil {
+ return x.LaunchStage
+ }
+ return api.LaunchStage_LAUNCH_STAGE_UNSPECIFIED
+}
+
+func (x *ClientLibrarySettings) GetRestNumericEnums() bool {
+ if x != nil {
+ return x.RestNumericEnums
+ }
+ return false
+}
+
+func (x *ClientLibrarySettings) GetJavaSettings() *JavaSettings {
+ if x != nil {
+ return x.JavaSettings
+ }
+ return nil
+}
+
+func (x *ClientLibrarySettings) GetCppSettings() *CppSettings {
+ if x != nil {
+ return x.CppSettings
+ }
+ return nil
+}
+
+func (x *ClientLibrarySettings) GetPhpSettings() *PhpSettings {
+ if x != nil {
+ return x.PhpSettings
+ }
+ return nil
+}
+
+func (x *ClientLibrarySettings) GetPythonSettings() *PythonSettings {
+ if x != nil {
+ return x.PythonSettings
+ }
+ return nil
+}
+
+func (x *ClientLibrarySettings) GetNodeSettings() *NodeSettings {
+ if x != nil {
+ return x.NodeSettings
+ }
+ return nil
+}
+
+func (x *ClientLibrarySettings) GetDotnetSettings() *DotnetSettings {
+ if x != nil {
+ return x.DotnetSettings
+ }
+ return nil
+}
+
+func (x *ClientLibrarySettings) GetRubySettings() *RubySettings {
+ if x != nil {
+ return x.RubySettings
+ }
+ return nil
+}
+
+func (x *ClientLibrarySettings) GetGoSettings() *GoSettings {
+ if x != nil {
+ return x.GoSettings
+ }
+ return nil
+}
+
+// This message configures the settings for publishing [Google Cloud Client
+// libraries](https://cloud.google.com/apis/docs/cloud-client-libraries)
+// generated from the service config.
+type Publishing struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // A list of API method settings, e.g. the behavior for methods that use the
+ // long-running operation pattern.
+ MethodSettings []*MethodSettings `protobuf:"bytes,2,rep,name=method_settings,json=methodSettings,proto3" json:"method_settings,omitempty"`
+ // Link to a *public* URI where users can report issues. Example:
+ // https://issuetracker.google.com/issues/new?component=190865&template=1161103
+ NewIssueUri string `protobuf:"bytes,101,opt,name=new_issue_uri,json=newIssueUri,proto3" json:"new_issue_uri,omitempty"`
+ // Link to product home page. Example:
+ // https://cloud.google.com/asset-inventory/docs/overview
+ DocumentationUri string `protobuf:"bytes,102,opt,name=documentation_uri,json=documentationUri,proto3" json:"documentation_uri,omitempty"`
+ // Used as a tracking tag when collecting data about the APIs developer
+ // relations artifacts like docs, packages delivered to package managers,
+ // etc. Example: "speech".
+ ApiShortName string `protobuf:"bytes,103,opt,name=api_short_name,json=apiShortName,proto3" json:"api_short_name,omitempty"`
+ // GitHub label to apply to issues and pull requests opened for this API.
+ GithubLabel string `protobuf:"bytes,104,opt,name=github_label,json=githubLabel,proto3" json:"github_label,omitempty"`
+ // GitHub teams to be added to CODEOWNERS in the directory in GitHub
+ // containing source code for the client libraries for this API.
+ CodeownerGithubTeams []string `protobuf:"bytes,105,rep,name=codeowner_github_teams,json=codeownerGithubTeams,proto3" json:"codeowner_github_teams,omitempty"`
+ // A prefix used in sample code when demarking regions to be included in
+ // documentation.
+ DocTagPrefix string `protobuf:"bytes,106,opt,name=doc_tag_prefix,json=docTagPrefix,proto3" json:"doc_tag_prefix,omitempty"`
+ // For whom the client library is being published.
+ Organization ClientLibraryOrganization `protobuf:"varint,107,opt,name=organization,proto3,enum=google.api.ClientLibraryOrganization" json:"organization,omitempty"`
+ // Client library settings. If the same version string appears multiple
+ // times in this list, then the last one wins. Settings from earlier
+ // settings with the same version string are discarded.
+ LibrarySettings []*ClientLibrarySettings `protobuf:"bytes,109,rep,name=library_settings,json=librarySettings,proto3" json:"library_settings,omitempty"`
+ // Optional link to proto reference documentation. Example:
+ // https://cloud.google.com/pubsub/lite/docs/reference/rpc
+ ProtoReferenceDocumentationUri string `protobuf:"bytes,110,opt,name=proto_reference_documentation_uri,json=protoReferenceDocumentationUri,proto3" json:"proto_reference_documentation_uri,omitempty"`
+ // Optional link to REST reference documentation. Example:
+ // https://cloud.google.com/pubsub/lite/docs/reference/rest
+ RestReferenceDocumentationUri string `protobuf:"bytes,111,opt,name=rest_reference_documentation_uri,json=restReferenceDocumentationUri,proto3" json:"rest_reference_documentation_uri,omitempty"`
+}
+
+func (x *Publishing) Reset() {
+ *x = Publishing{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_client_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Publishing) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Publishing) ProtoMessage() {}
+
+func (x *Publishing) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_client_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Publishing.ProtoReflect.Descriptor instead.
+func (*Publishing) Descriptor() ([]byte, []int) {
+ return file_google_api_client_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *Publishing) GetMethodSettings() []*MethodSettings {
+ if x != nil {
+ return x.MethodSettings
+ }
+ return nil
+}
+
+func (x *Publishing) GetNewIssueUri() string {
+ if x != nil {
+ return x.NewIssueUri
+ }
+ return ""
+}
+
+func (x *Publishing) GetDocumentationUri() string {
+ if x != nil {
+ return x.DocumentationUri
+ }
+ return ""
+}
+
+func (x *Publishing) GetApiShortName() string {
+ if x != nil {
+ return x.ApiShortName
+ }
+ return ""
+}
+
+func (x *Publishing) GetGithubLabel() string {
+ if x != nil {
+ return x.GithubLabel
+ }
+ return ""
+}
+
+func (x *Publishing) GetCodeownerGithubTeams() []string {
+ if x != nil {
+ return x.CodeownerGithubTeams
+ }
+ return nil
+}
+
+func (x *Publishing) GetDocTagPrefix() string {
+ if x != nil {
+ return x.DocTagPrefix
+ }
+ return ""
+}
+
+func (x *Publishing) GetOrganization() ClientLibraryOrganization {
+ if x != nil {
+ return x.Organization
+ }
+ return ClientLibraryOrganization_CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED
+}
+
+func (x *Publishing) GetLibrarySettings() []*ClientLibrarySettings {
+ if x != nil {
+ return x.LibrarySettings
+ }
+ return nil
+}
+
+func (x *Publishing) GetProtoReferenceDocumentationUri() string {
+ if x != nil {
+ return x.ProtoReferenceDocumentationUri
+ }
+ return ""
+}
+
+func (x *Publishing) GetRestReferenceDocumentationUri() string {
+ if x != nil {
+ return x.RestReferenceDocumentationUri
+ }
+ return ""
+}
+
+// Settings for Java client libraries.
+type JavaSettings struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The package name to use in Java. Clobbers the java_package option
+ // set in the protobuf. This should be used **only** by APIs
+ // who have already set the language_settings.java.package_name" field
+ // in gapic.yaml. API teams should use the protobuf java_package option
+ // where possible.
+ //
+ // Example of a YAML configuration::
+ //
+ // publishing:
+ // java_settings:
+ // library_package: com.google.cloud.pubsub.v1
+ LibraryPackage string `protobuf:"bytes,1,opt,name=library_package,json=libraryPackage,proto3" json:"library_package,omitempty"`
+ // Configure the Java class name to use instead of the service's for its
+ // corresponding generated GAPIC client. Keys are fully-qualified
+ // service names as they appear in the protobuf (including the full
+ // the language_settings.java.interface_names" field in gapic.yaml. API
+ // teams should otherwise use the service name as it appears in the
+ // protobuf.
+ //
+ // Example of a YAML configuration::
+ //
+ // publishing:
+ // java_settings:
+ // service_class_names:
+ // - google.pubsub.v1.Publisher: TopicAdmin
+ // - google.pubsub.v1.Subscriber: SubscriptionAdmin
+ ServiceClassNames map[string]string `protobuf:"bytes,2,rep,name=service_class_names,json=serviceClassNames,proto3" json:"service_class_names,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // Some settings.
+ Common *CommonLanguageSettings `protobuf:"bytes,3,opt,name=common,proto3" json:"common,omitempty"`
+}
+
+func (x *JavaSettings) Reset() {
+ *x = JavaSettings{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_client_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *JavaSettings) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*JavaSettings) ProtoMessage() {}
+
+func (x *JavaSettings) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_client_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use JavaSettings.ProtoReflect.Descriptor instead.
+func (*JavaSettings) Descriptor() ([]byte, []int) {
+ return file_google_api_client_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *JavaSettings) GetLibraryPackage() string {
+ if x != nil {
+ return x.LibraryPackage
+ }
+ return ""
+}
+
+func (x *JavaSettings) GetServiceClassNames() map[string]string {
+ if x != nil {
+ return x.ServiceClassNames
+ }
+ return nil
+}
+
+func (x *JavaSettings) GetCommon() *CommonLanguageSettings {
+ if x != nil {
+ return x.Common
+ }
+ return nil
+}
+
+// Settings for C++ client libraries.
+type CppSettings struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Some settings.
+ Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"`
+}
+
+func (x *CppSettings) Reset() {
+ *x = CppSettings{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_client_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CppSettings) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CppSettings) ProtoMessage() {}
+
+func (x *CppSettings) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_client_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CppSettings.ProtoReflect.Descriptor instead.
+func (*CppSettings) Descriptor() ([]byte, []int) {
+ return file_google_api_client_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *CppSettings) GetCommon() *CommonLanguageSettings {
+ if x != nil {
+ return x.Common
+ }
+ return nil
+}
+
+// Settings for Php client libraries.
+type PhpSettings struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Some settings.
+ Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"`
+}
+
+func (x *PhpSettings) Reset() {
+ *x = PhpSettings{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_client_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PhpSettings) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PhpSettings) ProtoMessage() {}
+
+func (x *PhpSettings) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_client_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PhpSettings.ProtoReflect.Descriptor instead.
+func (*PhpSettings) Descriptor() ([]byte, []int) {
+ return file_google_api_client_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *PhpSettings) GetCommon() *CommonLanguageSettings {
+ if x != nil {
+ return x.Common
+ }
+ return nil
+}
+
+// Settings for Python client libraries.
+type PythonSettings struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Some settings.
+ Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"`
+ // Experimental features to be included during client library generation.
+ ExperimentalFeatures *PythonSettings_ExperimentalFeatures `protobuf:"bytes,2,opt,name=experimental_features,json=experimentalFeatures,proto3" json:"experimental_features,omitempty"`
+}
+
+func (x *PythonSettings) Reset() {
+ *x = PythonSettings{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_client_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PythonSettings) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PythonSettings) ProtoMessage() {}
+
+func (x *PythonSettings) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_client_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PythonSettings.ProtoReflect.Descriptor instead.
+func (*PythonSettings) Descriptor() ([]byte, []int) {
+ return file_google_api_client_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *PythonSettings) GetCommon() *CommonLanguageSettings {
+ if x != nil {
+ return x.Common
+ }
+ return nil
+}
+
+func (x *PythonSettings) GetExperimentalFeatures() *PythonSettings_ExperimentalFeatures {
+ if x != nil {
+ return x.ExperimentalFeatures
+ }
+ return nil
+}
+
+// Settings for Node client libraries.
+type NodeSettings struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Some settings.
+ Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"`
+}
+
+func (x *NodeSettings) Reset() {
+ *x = NodeSettings{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_client_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *NodeSettings) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*NodeSettings) ProtoMessage() {}
+
+func (x *NodeSettings) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_client_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use NodeSettings.ProtoReflect.Descriptor instead.
+func (*NodeSettings) Descriptor() ([]byte, []int) {
+ return file_google_api_client_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *NodeSettings) GetCommon() *CommonLanguageSettings {
+ if x != nil {
+ return x.Common
+ }
+ return nil
+}
+
+// Settings for Dotnet client libraries.
+type DotnetSettings struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Some settings.
+ Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"`
+ // Map from original service names to renamed versions.
+ // This is used when the default generated types
+ // would cause a naming conflict. (Neither name is
+ // fully-qualified.)
+ // Example: Subscriber to SubscriberServiceApi.
+ RenamedServices map[string]string `protobuf:"bytes,2,rep,name=renamed_services,json=renamedServices,proto3" json:"renamed_services,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // Map from full resource types to the effective short name
+ // for the resource. This is used when otherwise resource
+ // named from different services would cause naming collisions.
+ // Example entry:
+ // "datalabeling.googleapis.com/Dataset": "DataLabelingDataset"
+ RenamedResources map[string]string `protobuf:"bytes,3,rep,name=renamed_resources,json=renamedResources,proto3" json:"renamed_resources,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // List of full resource types to ignore during generation.
+ // This is typically used for API-specific Location resources,
+ // which should be handled by the generator as if they were actually
+ // the common Location resources.
+ // Example entry: "documentai.googleapis.com/Location"
+ IgnoredResources []string `protobuf:"bytes,4,rep,name=ignored_resources,json=ignoredResources,proto3" json:"ignored_resources,omitempty"`
+ // Namespaces which must be aliased in snippets due to
+ // a known (but non-generator-predictable) naming collision
+ ForcedNamespaceAliases []string `protobuf:"bytes,5,rep,name=forced_namespace_aliases,json=forcedNamespaceAliases,proto3" json:"forced_namespace_aliases,omitempty"`
+ // Method signatures (in the form "service.method(signature)")
+ // which are provided separately, so shouldn't be generated.
+ // Snippets *calling* these methods are still generated, however.
+ HandwrittenSignatures []string `protobuf:"bytes,6,rep,name=handwritten_signatures,json=handwrittenSignatures,proto3" json:"handwritten_signatures,omitempty"`
+}
+
+func (x *DotnetSettings) Reset() {
+ *x = DotnetSettings{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_client_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DotnetSettings) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DotnetSettings) ProtoMessage() {}
+
+func (x *DotnetSettings) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_client_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DotnetSettings.ProtoReflect.Descriptor instead.
+func (*DotnetSettings) Descriptor() ([]byte, []int) {
+ return file_google_api_client_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *DotnetSettings) GetCommon() *CommonLanguageSettings {
+ if x != nil {
+ return x.Common
+ }
+ return nil
+}
+
+func (x *DotnetSettings) GetRenamedServices() map[string]string {
+ if x != nil {
+ return x.RenamedServices
+ }
+ return nil
+}
+
+func (x *DotnetSettings) GetRenamedResources() map[string]string {
+ if x != nil {
+ return x.RenamedResources
+ }
+ return nil
+}
+
+func (x *DotnetSettings) GetIgnoredResources() []string {
+ if x != nil {
+ return x.IgnoredResources
+ }
+ return nil
+}
+
+func (x *DotnetSettings) GetForcedNamespaceAliases() []string {
+ if x != nil {
+ return x.ForcedNamespaceAliases
+ }
+ return nil
+}
+
+func (x *DotnetSettings) GetHandwrittenSignatures() []string {
+ if x != nil {
+ return x.HandwrittenSignatures
+ }
+ return nil
+}
+
+// Settings for Ruby client libraries.
+type RubySettings struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Some settings.
+ Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"`
+}
+
+func (x *RubySettings) Reset() {
+ *x = RubySettings{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_client_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RubySettings) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RubySettings) ProtoMessage() {}
+
+func (x *RubySettings) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_client_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RubySettings.ProtoReflect.Descriptor instead.
+func (*RubySettings) Descriptor() ([]byte, []int) {
+ return file_google_api_client_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *RubySettings) GetCommon() *CommonLanguageSettings {
+ if x != nil {
+ return x.Common
+ }
+ return nil
+}
+
+// Settings for Go client libraries.
+type GoSettings struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Some settings.
+ Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"`
+ // Map of service names to renamed services. Keys are the package relative
+ // service names and values are the name to be used for the service client
+ // and call options.
+ //
+ // publishing:
+ //
+ // go_settings:
+ // renamed_services:
+ // Publisher: TopicAdmin
+ RenamedServices map[string]string `protobuf:"bytes,2,rep,name=renamed_services,json=renamedServices,proto3" json:"renamed_services,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (x *GoSettings) Reset() {
+ *x = GoSettings{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_client_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GoSettings) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GoSettings) ProtoMessage() {}
+
+func (x *GoSettings) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_client_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GoSettings.ProtoReflect.Descriptor instead.
+func (*GoSettings) Descriptor() ([]byte, []int) {
+ return file_google_api_client_proto_rawDescGZIP(), []int{10}
+}
+
+func (x *GoSettings) GetCommon() *CommonLanguageSettings {
+ if x != nil {
+ return x.Common
+ }
+ return nil
+}
+
+func (x *GoSettings) GetRenamedServices() map[string]string {
+ if x != nil {
+ return x.RenamedServices
+ }
+ return nil
+}
+
+// Describes the generator configuration for a method.
+type MethodSettings struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The fully qualified name of the method, for which the options below apply.
+ // This is used to find the method to apply the options.
+ //
+ // Example:
+ //
+ // publishing:
+ // method_settings:
+ // - selector: google.storage.control.v2.StorageControl.CreateFolder
+ // # method settings for CreateFolder...
+ Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"`
+ // Describes settings to use for long-running operations when generating
+ // API methods for RPCs. Complements RPCs that use the annotations in
+ // google/longrunning/operations.proto.
+ //
+ // Example of a YAML configuration::
+ //
+ // publishing:
+ // method_settings:
+ // - selector: google.cloud.speech.v2.Speech.BatchRecognize
+ // long_running:
+ // initial_poll_delay: 60s # 1 minute
+ // poll_delay_multiplier: 1.5
+ // max_poll_delay: 360s # 6 minutes
+ // total_poll_timeout: 54000s # 90 minutes
+ LongRunning *MethodSettings_LongRunning `protobuf:"bytes,2,opt,name=long_running,json=longRunning,proto3" json:"long_running,omitempty"`
+ // List of top-level fields of the request message, that should be
+ // automatically populated by the client libraries based on their
+ // (google.api.field_info).format. Currently supported format: UUID4.
+ //
+ // Example of a YAML configuration:
+ //
+ // publishing:
+ // method_settings:
+ // - selector: google.example.v1.ExampleService.CreateExample
+ // auto_populated_fields:
+ // - request_id
+ AutoPopulatedFields []string `protobuf:"bytes,3,rep,name=auto_populated_fields,json=autoPopulatedFields,proto3" json:"auto_populated_fields,omitempty"`
+}
+
+func (x *MethodSettings) Reset() {
+ *x = MethodSettings{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_client_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MethodSettings) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MethodSettings) ProtoMessage() {}
+
+func (x *MethodSettings) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_client_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MethodSettings.ProtoReflect.Descriptor instead.
+func (*MethodSettings) Descriptor() ([]byte, []int) {
+ return file_google_api_client_proto_rawDescGZIP(), []int{11}
+}
+
+func (x *MethodSettings) GetSelector() string {
+ if x != nil {
+ return x.Selector
+ }
+ return ""
+}
+
+func (x *MethodSettings) GetLongRunning() *MethodSettings_LongRunning {
+ if x != nil {
+ return x.LongRunning
+ }
+ return nil
+}
+
+func (x *MethodSettings) GetAutoPopulatedFields() []string {
+ if x != nil {
+ return x.AutoPopulatedFields
+ }
+ return nil
+}
+
+// This message is used to configure the generation of a subset of the RPCs in
+// a service for client libraries.
+type SelectiveGapicGeneration struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // An allowlist of the fully qualified names of RPCs that should be included
+ // on public client surfaces.
+ Methods []string `protobuf:"bytes,1,rep,name=methods,proto3" json:"methods,omitempty"`
+ // Setting this to true indicates to the client generators that methods
+ // that would be excluded from the generation should instead be generated
+ // in a way that indicates these methods should not be consumed by
+ // end users. How this is expressed is up to individual language
+ // implementations to decide. Some examples may be: added annotations,
+ // obfuscated identifiers, or other language idiomatic patterns.
+ GenerateOmittedAsInternal bool `protobuf:"varint,2,opt,name=generate_omitted_as_internal,json=generateOmittedAsInternal,proto3" json:"generate_omitted_as_internal,omitempty"`
+}
+
+func (x *SelectiveGapicGeneration) Reset() {
+ *x = SelectiveGapicGeneration{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_client_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SelectiveGapicGeneration) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SelectiveGapicGeneration) ProtoMessage() {}
+
+func (x *SelectiveGapicGeneration) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_client_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SelectiveGapicGeneration.ProtoReflect.Descriptor instead.
+func (*SelectiveGapicGeneration) Descriptor() ([]byte, []int) {
+ return file_google_api_client_proto_rawDescGZIP(), []int{12}
+}
+
+func (x *SelectiveGapicGeneration) GetMethods() []string {
+ if x != nil {
+ return x.Methods
+ }
+ return nil
+}
+
+func (x *SelectiveGapicGeneration) GetGenerateOmittedAsInternal() bool {
+ if x != nil {
+ return x.GenerateOmittedAsInternal
+ }
+ return false
+}
+
+// Experimental features to be included during client library generation.
+// These fields will be deprecated once the feature graduates and is enabled
+// by default.
+type PythonSettings_ExperimentalFeatures struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Enables generation of asynchronous REST clients if `rest` transport is
+ // enabled. By default, asynchronous REST clients will not be generated.
+ // This feature will be enabled by default 1 month after launching the
+ // feature in preview packages.
+ RestAsyncIoEnabled bool `protobuf:"varint,1,opt,name=rest_async_io_enabled,json=restAsyncIoEnabled,proto3" json:"rest_async_io_enabled,omitempty"`
+ // Enables generation of protobuf code using new types that are more
+ // Pythonic which are included in `protobuf>=5.29.x`. This feature will be
+ // enabled by default 1 month after launching the feature in preview
+ // packages.
+ ProtobufPythonicTypesEnabled bool `protobuf:"varint,2,opt,name=protobuf_pythonic_types_enabled,json=protobufPythonicTypesEnabled,proto3" json:"protobuf_pythonic_types_enabled,omitempty"`
+ // Disables generation of an unversioned Python package for this client
+ // library. This means that the module names will need to be versioned in
+ // import statements. For example `import google.cloud.library_v2` instead
+ // of `import google.cloud.library`.
+ UnversionedPackageDisabled bool `protobuf:"varint,3,opt,name=unversioned_package_disabled,json=unversionedPackageDisabled,proto3" json:"unversioned_package_disabled,omitempty"`
+}
+
+func (x *PythonSettings_ExperimentalFeatures) Reset() {
+ *x = PythonSettings_ExperimentalFeatures{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_client_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PythonSettings_ExperimentalFeatures) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PythonSettings_ExperimentalFeatures) ProtoMessage() {}
+
+func (x *PythonSettings_ExperimentalFeatures) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_client_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PythonSettings_ExperimentalFeatures.ProtoReflect.Descriptor instead.
+func (*PythonSettings_ExperimentalFeatures) Descriptor() ([]byte, []int) {
+ return file_google_api_client_proto_rawDescGZIP(), []int{6, 0}
+}
+
+func (x *PythonSettings_ExperimentalFeatures) GetRestAsyncIoEnabled() bool {
+ if x != nil {
+ return x.RestAsyncIoEnabled
+ }
+ return false
+}
+
+func (x *PythonSettings_ExperimentalFeatures) GetProtobufPythonicTypesEnabled() bool {
+ if x != nil {
+ return x.ProtobufPythonicTypesEnabled
+ }
+ return false
+}
+
+func (x *PythonSettings_ExperimentalFeatures) GetUnversionedPackageDisabled() bool {
+ if x != nil {
+ return x.UnversionedPackageDisabled
+ }
+ return false
+}
+
+// Describes settings to use when generating API methods that use the
+// long-running operation pattern.
+// All default values below are from those used in the client library
+// generators (e.g.
+// [Java](https://github.com/googleapis/gapic-generator-java/blob/04c2faa191a9b5a10b92392fe8482279c4404803/src/main/java/com/google/api/generator/gapic/composer/common/RetrySettingsComposer.java)).
+type MethodSettings_LongRunning struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Initial delay after which the first poll request will be made.
+ // Default value: 5 seconds.
+ InitialPollDelay *durationpb.Duration `protobuf:"bytes,1,opt,name=initial_poll_delay,json=initialPollDelay,proto3" json:"initial_poll_delay,omitempty"`
+ // Multiplier to gradually increase delay between subsequent polls until it
+ // reaches max_poll_delay.
+ // Default value: 1.5.
+ PollDelayMultiplier float32 `protobuf:"fixed32,2,opt,name=poll_delay_multiplier,json=pollDelayMultiplier,proto3" json:"poll_delay_multiplier,omitempty"`
+ // Maximum time between two subsequent poll requests.
+ // Default value: 45 seconds.
+ MaxPollDelay *durationpb.Duration `protobuf:"bytes,3,opt,name=max_poll_delay,json=maxPollDelay,proto3" json:"max_poll_delay,omitempty"`
+ // Total polling timeout.
+ // Default value: 5 minutes.
+ TotalPollTimeout *durationpb.Duration `protobuf:"bytes,4,opt,name=total_poll_timeout,json=totalPollTimeout,proto3" json:"total_poll_timeout,omitempty"`
+}
+
+func (x *MethodSettings_LongRunning) Reset() {
+ *x = MethodSettings_LongRunning{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_client_proto_msgTypes[18]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MethodSettings_LongRunning) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MethodSettings_LongRunning) ProtoMessage() {}
+
+func (x *MethodSettings_LongRunning) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_client_proto_msgTypes[18]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MethodSettings_LongRunning.ProtoReflect.Descriptor instead.
+func (*MethodSettings_LongRunning) Descriptor() ([]byte, []int) {
+ return file_google_api_client_proto_rawDescGZIP(), []int{11, 0}
+}
+
+func (x *MethodSettings_LongRunning) GetInitialPollDelay() *durationpb.Duration {
+ if x != nil {
+ return x.InitialPollDelay
+ }
+ return nil
+}
+
+func (x *MethodSettings_LongRunning) GetPollDelayMultiplier() float32 {
+ if x != nil {
+ return x.PollDelayMultiplier
+ }
+ return 0
+}
+
+func (x *MethodSettings_LongRunning) GetMaxPollDelay() *durationpb.Duration {
+ if x != nil {
+ return x.MaxPollDelay
+ }
+ return nil
+}
+
+func (x *MethodSettings_LongRunning) GetTotalPollTimeout() *durationpb.Duration {
+ if x != nil {
+ return x.TotalPollTimeout
+ }
+ return nil
+}
+
+var file_google_api_client_proto_extTypes = []protoimpl.ExtensionInfo{
+ {
+ ExtendedType: (*descriptorpb.MethodOptions)(nil),
+ ExtensionType: ([]string)(nil),
+ Field: 1051,
+ Name: "google.api.method_signature",
+ Tag: "bytes,1051,rep,name=method_signature",
+ Filename: "google/api/client.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.ServiceOptions)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 1049,
+ Name: "google.api.default_host",
+ Tag: "bytes,1049,opt,name=default_host",
+ Filename: "google/api/client.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.ServiceOptions)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 1050,
+ Name: "google.api.oauth_scopes",
+ Tag: "bytes,1050,opt,name=oauth_scopes",
+ Filename: "google/api/client.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.ServiceOptions)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 525000001,
+ Name: "google.api.api_version",
+ Tag: "bytes,525000001,opt,name=api_version",
+ Filename: "google/api/client.proto",
+ },
+}
+
+// Extension fields to descriptorpb.MethodOptions.
+var (
+ // A definition of a client library method signature.
+ //
+ // In client libraries, each proto RPC corresponds to one or more methods
+ // which the end user is able to call, and calls the underlying RPC.
+ // Normally, this method receives a single argument (a struct or instance
+ // corresponding to the RPC request object). Defining this field will
+ // add one or more overloads providing flattened or simpler method signatures
+ // in some languages.
+ //
+ // The fields on the method signature are provided as a comma-separated
+ // string.
+ //
+ // For example, the proto RPC and annotation:
+ //
+ // rpc CreateSubscription(CreateSubscriptionRequest)
+ // returns (Subscription) {
+ // option (google.api.method_signature) = "name,topic";
+ // }
+ //
+ // Would add the following Java overload (in addition to the method accepting
+ // the request object):
+ //
+ // public final Subscription createSubscription(String name, String topic)
+ //
+ // The following backwards-compatibility guidelines apply:
+ //
+ // - Adding this annotation to an unannotated method is backwards
+ // compatible.
+ // - Adding this annotation to a method which already has existing
+ // method signature annotations is backwards compatible if and only if
+ // the new method signature annotation is last in the sequence.
+ // - Modifying or removing an existing method signature annotation is
+ // a breaking change.
+ // - Re-ordering existing method signature annotations is a breaking
+ // change.
+ //
+ // repeated string method_signature = 1051;
+ E_MethodSignature = &file_google_api_client_proto_extTypes[0]
+)
+
+// Extension fields to descriptorpb.ServiceOptions.
+var (
+ // The hostname for this service.
+ // This should be specified with no prefix or protocol.
+ //
+ // Example:
+ //
+ // service Foo {
+ // option (google.api.default_host) = "foo.googleapi.com";
+ // ...
+ // }
+ //
+ // optional string default_host = 1049;
+ E_DefaultHost = &file_google_api_client_proto_extTypes[1]
+ // OAuth scopes needed for the client.
+ //
+ // Example:
+ //
+ // service Foo {
+ // option (google.api.oauth_scopes) = \
+ // "https://www.googleapis.com/auth/cloud-platform";
+ // ...
+ // }
+ //
+ // If there is more than one scope, use a comma-separated string:
+ //
+ // Example:
+ //
+ // service Foo {
+ // option (google.api.oauth_scopes) = \
+ // "https://www.googleapis.com/auth/cloud-platform,"
+ // "https://www.googleapis.com/auth/monitoring";
+ // ...
+ // }
+ //
+ // optional string oauth_scopes = 1050;
+ E_OauthScopes = &file_google_api_client_proto_extTypes[2]
+ // The API version of this service, which should be sent by version-aware
+ // clients to the service. This allows services to abide by the schema and
+ // behavior of the service at the time this API version was deployed.
+ // The format of the API version must be treated as opaque by clients.
+ // Services may use a format with an apparent structure, but clients must
+ // not rely on this to determine components within an API version, or attempt
+ // to construct other valid API versions. Note that this is for upcoming
+ // functionality and may not be implemented for all services.
+ //
+ // Example:
+ //
+ // service Foo {
+ // option (google.api.api_version) = "v1_20230821_preview";
+ // }
+ //
+ // optional string api_version = 525000001;
+ E_ApiVersion = &file_google_api_client_proto_extTypes[3]
+)
+
+var File_google_api_client_proto protoreflect.FileDescriptor
+
+var file_google_api_client_proto_rawDesc = []byte{
+ 0x0a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69,
+ 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70,
+ 0x69, 0x2f, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf8, 0x01, 0x0a, 0x16, 0x43, 0x6f, 0x6d, 0x6d, 0x6f,
+ 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67,
+ 0x73, 0x12, 0x30, 0x0a, 0x12, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64,
+ 0x6f, 0x63, 0x73, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18,
+ 0x01, 0x52, 0x10, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x44, 0x6f, 0x63, 0x73,
+ 0x55, 0x72, 0x69, 0x12, 0x48, 0x0a, 0x0c, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62,
+ 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+ 0x0c, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x62, 0x0a,
+ 0x1a, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x67, 0x61, 0x70, 0x69, 0x63,
+ 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53,
+ 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x47, 0x61, 0x70, 0x69, 0x63, 0x47, 0x65, 0x6e,
+ 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x18, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69,
+ 0x76, 0x65, 0x47, 0x61, 0x70, 0x69, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x22, 0x93, 0x05, 0x0a, 0x15, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72,
+ 0x61, 0x72, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76,
+ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65,
+ 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3a, 0x0a, 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f,
+ 0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53,
+ 0x74, 0x61, 0x67, 0x65, 0x52, 0x0b, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67,
+ 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x69,
+ 0x63, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72,
+ 0x65, 0x73, 0x74, 0x4e, 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x45, 0x6e, 0x75, 0x6d, 0x73, 0x12,
+ 0x3d, 0x0a, 0x0d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73,
+ 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x61, 0x70, 0x69, 0x2e, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73,
+ 0x52, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a,
+ 0x0a, 0x0c, 0x63, 0x70, 0x70, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x16,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70,
+ 0x69, 0x2e, 0x43, 0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x63,
+ 0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x70, 0x68,
+ 0x70, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x68,
+ 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x70, 0x68, 0x70, 0x53, 0x65,
+ 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e,
+ 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74,
+ 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x70, 0x79, 0x74,
+ 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x6e,
+ 0x6f, 0x64, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x19, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e,
+ 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x6e, 0x6f,
+ 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x64, 0x6f,
+ 0x74, 0x6e, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1a, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69,
+ 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52,
+ 0x0e, 0x64, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12,
+ 0x3d, 0x0a, 0x0d, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73,
+ 0x18, 0x1b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x61, 0x70, 0x69, 0x2e, 0x52, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73,
+ 0x52, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x37,
+ 0x0a, 0x0b, 0x67, 0x6f, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1c, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69,
+ 0x2e, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0a, 0x67, 0x6f, 0x53,
+ 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x22, 0xf4, 0x04, 0x0a, 0x0a, 0x50, 0x75, 0x62, 0x6c,
+ 0x69, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x12, 0x43, 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64,
+ 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74,
+ 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x6d, 0x65, 0x74,
+ 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x6e,
+ 0x65, 0x77, 0x5f, 0x69, 0x73, 0x73, 0x75, 0x65, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x65, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x65, 0x77, 0x49, 0x73, 0x73, 0x75, 0x65, 0x55, 0x72, 0x69, 0x12,
+ 0x2b, 0x0a, 0x11, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x75, 0x72, 0x69, 0x18, 0x66, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x6f, 0x63, 0x75,
+ 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x12, 0x24, 0x0a, 0x0e,
+ 0x61, 0x70, 0x69, 0x5f, 0x73, 0x68, 0x6f, 0x72, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x67,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x70, 0x69, 0x53, 0x68, 0x6f, 0x72, 0x74, 0x4e, 0x61,
+ 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x6c, 0x61, 0x62,
+ 0x65, 0x6c, 0x18, 0x68, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
+ 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x34, 0x0a, 0x16, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e,
+ 0x65, 0x72, 0x5f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x73, 0x18,
+ 0x69, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, 0x65, 0x72,
+ 0x47, 0x69, 0x74, 0x68, 0x75, 0x62, 0x54, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x64,
+ 0x6f, 0x63, 0x5f, 0x74, 0x61, 0x67, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x6a, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x6f, 0x63, 0x54, 0x61, 0x67, 0x50, 0x72, 0x65, 0x66, 0x69,
+ 0x78, 0x12, 0x49, 0x0a, 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x18, 0x6b, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61,
+ 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c,
+ 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4c, 0x0a, 0x10,
+ 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73,
+ 0x18, 0x6d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72,
+ 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61,
+ 0x72, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x49, 0x0a, 0x21, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f,
+ 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18,
+ 0x6e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x65, 0x66, 0x65,
+ 0x72, 0x65, 0x6e, 0x63, 0x65, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x12, 0x47, 0x0a, 0x20, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x72, 0x65,
+ 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x6f, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x1d, 0x72, 0x65, 0x73, 0x74, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x44, 0x6f,
+ 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x22, 0x9a,
+ 0x02, 0x0a, 0x0c, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12,
+ 0x27, 0x0a, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61,
+ 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72,
+ 0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x5f, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18,
+ 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61,
+ 0x70, 0x69, 0x2e, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e,
+ 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65,
+ 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43,
+ 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d,
+ 0x6d, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e,
+ 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63,
+ 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x1a, 0x44, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12,
+ 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65,
+ 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x49, 0x0a, 0x0b, 0x43,
+ 0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f,
+ 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61,
+ 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06,
+ 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x49, 0x0a, 0x0b, 0x50, 0x68, 0x70, 0x53, 0x65, 0x74,
+ 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61,
+ 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67,
+ 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f,
+ 0x6e, 0x22, 0x87, 0x03, 0x0a, 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74,
+ 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70,
+ 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65,
+ 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e,
+ 0x12, 0x64, 0x0a, 0x15, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c,
+ 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74,
+ 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x45, 0x78, 0x70, 0x65,
+ 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73,
+ 0x52, 0x14, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65,
+ 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a, 0xd2, 0x01, 0x0a, 0x14, 0x45, 0x78, 0x70, 0x65, 0x72,
+ 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12,
+ 0x31, 0x0a, 0x15, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x6f,
+ 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12,
+ 0x72, 0x65, 0x73, 0x74, 0x41, 0x73, 0x79, 0x6e, 0x63, 0x49, 0x6f, 0x45, 0x6e, 0x61, 0x62, 0x6c,
+ 0x65, 0x64, 0x12, 0x45, 0x0a, 0x1f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x5f, 0x70,
+ 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x69, 0x63, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x5f, 0x65, 0x6e,
+ 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1c, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x69, 0x63, 0x54, 0x79, 0x70,
+ 0x65, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x40, 0x0a, 0x1c, 0x75, 0x6e, 0x76,
+ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x64, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65,
+ 0x5f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x1a, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x64, 0x50, 0x61, 0x63, 0x6b,
+ 0x61, 0x67, 0x65, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x4a, 0x0a, 0x0c, 0x4e,
+ 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63,
+ 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c,
+ 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52,
+ 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xae, 0x04, 0x0a, 0x0e, 0x44, 0x6f, 0x74, 0x6e,
+ 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f,
+ 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61,
+ 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06,
+ 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65,
+ 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f,
+ 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e,
+ 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72,
+ 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x73, 0x12, 0x5d, 0x0a, 0x11, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x72, 0x65,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65,
+ 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65,
+ 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52,
+ 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x69, 0x67,
+ 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x38,
+ 0x0a, 0x18, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09,
+ 0x52, 0x16, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63,
+ 0x65, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x16, 0x68, 0x61, 0x6e, 0x64,
+ 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
+ 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x15, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72,
+ 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a,
+ 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a,
+ 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03,
+ 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14,
+ 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4a, 0x0a, 0x0c, 0x52, 0x75, 0x62, 0x79,
+ 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d,
+ 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67,
+ 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f,
+ 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xe4, 0x01, 0x0a, 0x0a, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69,
+ 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69,
+ 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53,
+ 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12,
+ 0x56, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67,
+ 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53,
+ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d,
+ 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12,
+ 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65,
+ 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xc2, 0x03, 0x0a, 0x0e,
+ 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x1a,
+ 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x49, 0x0a, 0x0c, 0x6c, 0x6f,
+ 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65,
+ 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x4c, 0x6f, 0x6e,
+ 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0b, 0x6c, 0x6f, 0x6e, 0x67, 0x52, 0x75,
+ 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x70, 0x6f,
+ 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x03,
+ 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x61, 0x75, 0x74, 0x6f, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61,
+ 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x94, 0x02, 0x0a, 0x0b, 0x4c, 0x6f,
+ 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x47, 0x0a, 0x12, 0x69, 0x6e, 0x69,
+ 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c,
+ 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79,
+ 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x4d, 0x75, 0x6c, 0x74,
+ 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x6f,
+ 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x50, 0x6f,
+ 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c,
+ 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10,
+ 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74,
+ 0x22, 0x75, 0x0a, 0x18, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x47, 0x61, 0x70,
+ 0x69, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07,
+ 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x6d,
+ 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x12, 0x3f, 0x0a, 0x1c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
+ 0x74, 0x65, 0x5f, 0x6f, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x73, 0x5f, 0x69, 0x6e,
+ 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x67, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x4f, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x41, 0x73, 0x49,
+ 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65,
+ 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f,
+ 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41,
+ 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44,
+ 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a,
+ 0x03, 0x41, 0x44, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53,
+ 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45,
+ 0x57, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10,
+ 0x05, 0x12, 0x07, 0x0a, 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45,
+ 0x4e, 0x45, 0x52, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a,
+ 0x18, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65,
+ 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49,
+ 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54,
+ 0x49, 0x4e, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46,
+ 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10,
+ 0x0a, 0x12, 0x13, 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e,
+ 0x41, 0x47, 0x45, 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64,
+ 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74,
+ 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28,
+ 0x09, 0x52, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
+ 0x72, 0x65, 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f,
+ 0x73, 0x74, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61,
+ 0x75, 0x6c, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68,
+ 0x5f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x0b, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x3a, 0x44, 0x0a, 0x0b,
+ 0x61, 0x70, 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65,
+ 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xba, 0xab,
+ 0xfa, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x42, 0x69, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74,
+ 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61,
+ 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61,
+ 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_api_client_proto_rawDescOnce sync.Once
+ file_google_api_client_proto_rawDescData = file_google_api_client_proto_rawDesc
+)
+
+func file_google_api_client_proto_rawDescGZIP() []byte {
+ file_google_api_client_proto_rawDescOnce.Do(func() {
+ file_google_api_client_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_client_proto_rawDescData)
+ })
+ return file_google_api_client_proto_rawDescData
+}
+
+var file_google_api_client_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
+var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 19)
+var file_google_api_client_proto_goTypes = []interface{}{
+ (ClientLibraryOrganization)(0), // 0: google.api.ClientLibraryOrganization
+ (ClientLibraryDestination)(0), // 1: google.api.ClientLibraryDestination
+ (*CommonLanguageSettings)(nil), // 2: google.api.CommonLanguageSettings
+ (*ClientLibrarySettings)(nil), // 3: google.api.ClientLibrarySettings
+ (*Publishing)(nil), // 4: google.api.Publishing
+ (*JavaSettings)(nil), // 5: google.api.JavaSettings
+ (*CppSettings)(nil), // 6: google.api.CppSettings
+ (*PhpSettings)(nil), // 7: google.api.PhpSettings
+ (*PythonSettings)(nil), // 8: google.api.PythonSettings
+ (*NodeSettings)(nil), // 9: google.api.NodeSettings
+ (*DotnetSettings)(nil), // 10: google.api.DotnetSettings
+ (*RubySettings)(nil), // 11: google.api.RubySettings
+ (*GoSettings)(nil), // 12: google.api.GoSettings
+ (*MethodSettings)(nil), // 13: google.api.MethodSettings
+ (*SelectiveGapicGeneration)(nil), // 14: google.api.SelectiveGapicGeneration
+ nil, // 15: google.api.JavaSettings.ServiceClassNamesEntry
+ (*PythonSettings_ExperimentalFeatures)(nil), // 16: google.api.PythonSettings.ExperimentalFeatures
+ nil, // 17: google.api.DotnetSettings.RenamedServicesEntry
+ nil, // 18: google.api.DotnetSettings.RenamedResourcesEntry
+ nil, // 19: google.api.GoSettings.RenamedServicesEntry
+ (*MethodSettings_LongRunning)(nil), // 20: google.api.MethodSettings.LongRunning
+ (api.LaunchStage)(0), // 21: google.api.LaunchStage
+ (*durationpb.Duration)(nil), // 22: google.protobuf.Duration
+ (*descriptorpb.MethodOptions)(nil), // 23: google.protobuf.MethodOptions
+ (*descriptorpb.ServiceOptions)(nil), // 24: google.protobuf.ServiceOptions
+}
+var file_google_api_client_proto_depIdxs = []int32{
+ 1, // 0: google.api.CommonLanguageSettings.destinations:type_name -> google.api.ClientLibraryDestination
+ 14, // 1: google.api.CommonLanguageSettings.selective_gapic_generation:type_name -> google.api.SelectiveGapicGeneration
+ 21, // 2: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage
+ 5, // 3: google.api.ClientLibrarySettings.java_settings:type_name -> google.api.JavaSettings
+ 6, // 4: google.api.ClientLibrarySettings.cpp_settings:type_name -> google.api.CppSettings
+ 7, // 5: google.api.ClientLibrarySettings.php_settings:type_name -> google.api.PhpSettings
+ 8, // 6: google.api.ClientLibrarySettings.python_settings:type_name -> google.api.PythonSettings
+ 9, // 7: google.api.ClientLibrarySettings.node_settings:type_name -> google.api.NodeSettings
+ 10, // 8: google.api.ClientLibrarySettings.dotnet_settings:type_name -> google.api.DotnetSettings
+ 11, // 9: google.api.ClientLibrarySettings.ruby_settings:type_name -> google.api.RubySettings
+ 12, // 10: google.api.ClientLibrarySettings.go_settings:type_name -> google.api.GoSettings
+ 13, // 11: google.api.Publishing.method_settings:type_name -> google.api.MethodSettings
+ 0, // 12: google.api.Publishing.organization:type_name -> google.api.ClientLibraryOrganization
+ 3, // 13: google.api.Publishing.library_settings:type_name -> google.api.ClientLibrarySettings
+ 15, // 14: google.api.JavaSettings.service_class_names:type_name -> google.api.JavaSettings.ServiceClassNamesEntry
+ 2, // 15: google.api.JavaSettings.common:type_name -> google.api.CommonLanguageSettings
+ 2, // 16: google.api.CppSettings.common:type_name -> google.api.CommonLanguageSettings
+ 2, // 17: google.api.PhpSettings.common:type_name -> google.api.CommonLanguageSettings
+ 2, // 18: google.api.PythonSettings.common:type_name -> google.api.CommonLanguageSettings
+ 16, // 19: google.api.PythonSettings.experimental_features:type_name -> google.api.PythonSettings.ExperimentalFeatures
+ 2, // 20: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings
+ 2, // 21: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings
+ 17, // 22: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry
+ 18, // 23: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry
+ 2, // 24: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings
+ 2, // 25: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings
+ 19, // 26: google.api.GoSettings.renamed_services:type_name -> google.api.GoSettings.RenamedServicesEntry
+ 20, // 27: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning
+ 22, // 28: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration
+ 22, // 29: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration
+ 22, // 30: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration
+ 23, // 31: google.api.method_signature:extendee -> google.protobuf.MethodOptions
+ 24, // 32: google.api.default_host:extendee -> google.protobuf.ServiceOptions
+ 24, // 33: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions
+ 24, // 34: google.api.api_version:extendee -> google.protobuf.ServiceOptions
+ 35, // [35:35] is the sub-list for method output_type
+ 35, // [35:35] is the sub-list for method input_type
+ 35, // [35:35] is the sub-list for extension type_name
+ 31, // [31:35] is the sub-list for extension extendee
+ 0, // [0:31] is the sub-list for field type_name
+}
+
+func init() { file_google_api_client_proto_init() }
+func file_google_api_client_proto_init() {
+ if File_google_api_client_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_google_api_client_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CommonLanguageSettings); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_client_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ClientLibrarySettings); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_client_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Publishing); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_client_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*JavaSettings); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_client_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CppSettings); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_client_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PhpSettings); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_client_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PythonSettings); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_client_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*NodeSettings); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_client_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DotnetSettings); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_client_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RubySettings); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_client_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GoSettings); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_client_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MethodSettings); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_client_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SelectiveGapicGeneration); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_client_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PythonSettings_ExperimentalFeatures); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_client_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MethodSettings_LongRunning); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_api_client_proto_rawDesc,
+ NumEnums: 2,
+ NumMessages: 19,
+ NumExtensions: 4,
+ NumServices: 0,
+ },
+ GoTypes: file_google_api_client_proto_goTypes,
+ DependencyIndexes: file_google_api_client_proto_depIdxs,
+ EnumInfos: file_google_api_client_proto_enumTypes,
+ MessageInfos: file_google_api_client_proto_msgTypes,
+ ExtensionInfos: file_google_api_client_proto_extTypes,
+ }.Build()
+ File_google_api_client_proto = out.File
+ file_google_api_client_proto_rawDesc = nil
+ file_google_api_client_proto_goTypes = nil
+ file_google_api_client_proto_depIdxs = nil
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go
new file mode 100644
index 000000000..08505ba3f
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go
@@ -0,0 +1,266 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v4.24.4
+// source: google/api/field_behavior.proto
+
+package annotations
+
+import (
+ reflect "reflect"
+ sync "sync"
+
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// An indicator of the behavior of a given field (for example, that a field
+// is required in requests, or given as output but ignored as input).
+// This **does not** change the behavior in protocol buffers itself; it only
+// denotes the behavior and may affect how API tooling handles the field.
+//
+// Note: This enum **may** receive new values in the future.
+type FieldBehavior int32
+
+const (
+ // Conventional default for enums. Do not use this.
+ FieldBehavior_FIELD_BEHAVIOR_UNSPECIFIED FieldBehavior = 0
+ // Specifically denotes a field as optional.
+ // While all fields in protocol buffers are optional, this may be specified
+ // for emphasis if appropriate.
+ FieldBehavior_OPTIONAL FieldBehavior = 1
+ // Denotes a field as required.
+ // This indicates that the field **must** be provided as part of the request,
+ // and failure to do so will cause an error (usually `INVALID_ARGUMENT`).
+ FieldBehavior_REQUIRED FieldBehavior = 2
+ // Denotes a field as output only.
+ // This indicates that the field is provided in responses, but including the
+ // field in a request does nothing (the server *must* ignore it and
+ // *must not* throw an error as a result of the field's presence).
+ FieldBehavior_OUTPUT_ONLY FieldBehavior = 3
+ // Denotes a field as input only.
+ // This indicates that the field is provided in requests, and the
+ // corresponding field is not included in output.
+ FieldBehavior_INPUT_ONLY FieldBehavior = 4
+ // Denotes a field as immutable.
+ // This indicates that the field may be set once in a request to create a
+ // resource, but may not be changed thereafter.
+ FieldBehavior_IMMUTABLE FieldBehavior = 5
+ // Denotes that a (repeated) field is an unordered list.
+ // This indicates that the service may provide the elements of the list
+ // in any arbitrary order, rather than the order the user originally
+ // provided. Additionally, the list's order may or may not be stable.
+ FieldBehavior_UNORDERED_LIST FieldBehavior = 6
+ // Denotes that this field returns a non-empty default value if not set.
+ // This indicates that if the user provides the empty value in a request,
+ // a non-empty value will be returned. The user will not be aware of what
+ // non-empty value to expect.
+ FieldBehavior_NON_EMPTY_DEFAULT FieldBehavior = 7
+ // Denotes that the field in a resource (a message annotated with
+ // google.api.resource) is used in the resource name to uniquely identify the
+ // resource. For AIP-compliant APIs, this should only be applied to the
+ // `name` field on the resource.
+ //
+ // This behavior should not be applied to references to other resources within
+ // the message.
+ //
+ // The identifier field of resources often have different field behavior
+ // depending on the request it is embedded in (e.g. for Create methods name
+ // is optional and unused, while for Update methods it is required). Instead
+ // of method-specific annotations, only `IDENTIFIER` is required.
+ FieldBehavior_IDENTIFIER FieldBehavior = 8
+)
+
+// Enum value maps for FieldBehavior.
+var (
+ FieldBehavior_name = map[int32]string{
+ 0: "FIELD_BEHAVIOR_UNSPECIFIED",
+ 1: "OPTIONAL",
+ 2: "REQUIRED",
+ 3: "OUTPUT_ONLY",
+ 4: "INPUT_ONLY",
+ 5: "IMMUTABLE",
+ 6: "UNORDERED_LIST",
+ 7: "NON_EMPTY_DEFAULT",
+ 8: "IDENTIFIER",
+ }
+ FieldBehavior_value = map[string]int32{
+ "FIELD_BEHAVIOR_UNSPECIFIED": 0,
+ "OPTIONAL": 1,
+ "REQUIRED": 2,
+ "OUTPUT_ONLY": 3,
+ "INPUT_ONLY": 4,
+ "IMMUTABLE": 5,
+ "UNORDERED_LIST": 6,
+ "NON_EMPTY_DEFAULT": 7,
+ "IDENTIFIER": 8,
+ }
+)
+
+func (x FieldBehavior) Enum() *FieldBehavior {
+ p := new(FieldBehavior)
+ *p = x
+ return p
+}
+
+func (x FieldBehavior) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (FieldBehavior) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_api_field_behavior_proto_enumTypes[0].Descriptor()
+}
+
+func (FieldBehavior) Type() protoreflect.EnumType {
+ return &file_google_api_field_behavior_proto_enumTypes[0]
+}
+
+func (x FieldBehavior) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use FieldBehavior.Descriptor instead.
+func (FieldBehavior) EnumDescriptor() ([]byte, []int) {
+ return file_google_api_field_behavior_proto_rawDescGZIP(), []int{0}
+}
+
+var file_google_api_field_behavior_proto_extTypes = []protoimpl.ExtensionInfo{
+ {
+ ExtendedType: (*descriptorpb.FieldOptions)(nil),
+ ExtensionType: ([]FieldBehavior)(nil),
+ Field: 1052,
+ Name: "google.api.field_behavior",
+ Tag: "varint,1052,rep,name=field_behavior,enum=google.api.FieldBehavior",
+ Filename: "google/api/field_behavior.proto",
+ },
+}
+
+// Extension fields to descriptorpb.FieldOptions.
+var (
+ // A designation of a specific field behavior (required, output only, etc.)
+ // in protobuf messages.
+ //
+ // Examples:
+ //
+ // string name = 1 [(google.api.field_behavior) = REQUIRED];
+ // State state = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
+ // google.protobuf.Duration ttl = 1
+ // [(google.api.field_behavior) = INPUT_ONLY];
+ // google.protobuf.Timestamp expire_time = 1
+ // [(google.api.field_behavior) = OUTPUT_ONLY,
+ // (google.api.field_behavior) = IMMUTABLE];
+ //
+ // repeated google.api.FieldBehavior field_behavior = 1052;
+ E_FieldBehavior = &file_google_api_field_behavior_proto_extTypes[0]
+)
+
+var File_google_api_field_behavior_proto protoreflect.FileDescriptor
+
+var file_google_api_field_behavior_proto_rawDesc = []byte{
+ 0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65,
+ 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2a,
+ 0xb6, 0x01, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f,
+ 0x72, 0x12, 0x1e, 0x0a, 0x1a, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x42, 0x45, 0x48, 0x41, 0x56,
+ 0x49, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10,
+ 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12,
+ 0x0c, 0x0a, 0x08, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x12, 0x0f, 0x0a,
+ 0x0b, 0x4f, 0x55, 0x54, 0x50, 0x55, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x03, 0x12, 0x0e,
+ 0x0a, 0x0a, 0x49, 0x4e, 0x50, 0x55, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x04, 0x12, 0x0d,
+ 0x0a, 0x09, 0x49, 0x4d, 0x4d, 0x55, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x05, 0x12, 0x12, 0x0a,
+ 0x0e, 0x55, 0x4e, 0x4f, 0x52, 0x44, 0x45, 0x52, 0x45, 0x44, 0x5f, 0x4c, 0x49, 0x53, 0x54, 0x10,
+ 0x06, 0x12, 0x15, 0x0a, 0x11, 0x4e, 0x4f, 0x4e, 0x5f, 0x45, 0x4d, 0x50, 0x54, 0x59, 0x5f, 0x44,
+ 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x07, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4e,
+ 0x54, 0x49, 0x46, 0x49, 0x45, 0x52, 0x10, 0x08, 0x3a, 0x64, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c,
+ 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65,
+ 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9c, 0x08, 0x20, 0x03, 0x28, 0x0e,
+ 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69,
+ 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x42, 0x02, 0x10, 0x00, 0x52,
+ 0x0d, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x42, 0x70,
+ 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69,
+ 0x42, 0x12, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x50,
+ 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67,
+ 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70,
+ 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e,
+ 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49,
+ 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_api_field_behavior_proto_rawDescOnce sync.Once
+ file_google_api_field_behavior_proto_rawDescData = file_google_api_field_behavior_proto_rawDesc
+)
+
+func file_google_api_field_behavior_proto_rawDescGZIP() []byte {
+ file_google_api_field_behavior_proto_rawDescOnce.Do(func() {
+ file_google_api_field_behavior_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_field_behavior_proto_rawDescData)
+ })
+ return file_google_api_field_behavior_proto_rawDescData
+}
+
+var file_google_api_field_behavior_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_google_api_field_behavior_proto_goTypes = []interface{}{
+ (FieldBehavior)(0), // 0: google.api.FieldBehavior
+ (*descriptorpb.FieldOptions)(nil), // 1: google.protobuf.FieldOptions
+}
+var file_google_api_field_behavior_proto_depIdxs = []int32{
+ 1, // 0: google.api.field_behavior:extendee -> google.protobuf.FieldOptions
+ 0, // 1: google.api.field_behavior:type_name -> google.api.FieldBehavior
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 1, // [1:2] is the sub-list for extension type_name
+ 0, // [0:1] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_google_api_field_behavior_proto_init() }
+func file_google_api_field_behavior_proto_init() {
+ if File_google_api_field_behavior_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_api_field_behavior_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 0,
+ NumExtensions: 1,
+ NumServices: 0,
+ },
+ GoTypes: file_google_api_field_behavior_proto_goTypes,
+ DependencyIndexes: file_google_api_field_behavior_proto_depIdxs,
+ EnumInfos: file_google_api_field_behavior_proto_enumTypes,
+ ExtensionInfos: file_google_api_field_behavior_proto_extTypes,
+ }.Build()
+ File_google_api_field_behavior_proto = out.File
+ file_google_api_field_behavior_proto_rawDesc = nil
+ file_google_api_field_behavior_proto_goTypes = nil
+ file_google_api_field_behavior_proto_depIdxs = nil
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go
new file mode 100644
index 000000000..a462e7d01
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go
@@ -0,0 +1,392 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v4.24.4
+// source: google/api/field_info.proto
+
+package annotations
+
+import (
+ reflect "reflect"
+ sync "sync"
+
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// The standard format of a field value. The supported formats are all backed
+// by either an RFC defined by the IETF or a Google-defined AIP.
+type FieldInfo_Format int32
+
+const (
+ // Default, unspecified value.
+ FieldInfo_FORMAT_UNSPECIFIED FieldInfo_Format = 0
+ // Universally Unique Identifier, version 4, value as defined by
+ // https://datatracker.ietf.org/doc/html/rfc4122. The value may be
+ // normalized to entirely lowercase letters. For example, the value
+ // `F47AC10B-58CC-0372-8567-0E02B2C3D479` would be normalized to
+ // `f47ac10b-58cc-0372-8567-0e02b2c3d479`.
+ FieldInfo_UUID4 FieldInfo_Format = 1
+ // Internet Protocol v4 value as defined by [RFC
+ // 791](https://datatracker.ietf.org/doc/html/rfc791). The value may be
+ // condensed, with leading zeros in each octet stripped. For example,
+ // `001.022.233.040` would be condensed to `1.22.233.40`.
+ FieldInfo_IPV4 FieldInfo_Format = 2
+ // Internet Protocol v6 value as defined by [RFC
+ // 2460](https://datatracker.ietf.org/doc/html/rfc2460). The value may be
+ // normalized to entirely lowercase letters with zeros compressed, following
+ // [RFC 5952](https://datatracker.ietf.org/doc/html/rfc5952). For example,
+ // the value `2001:0DB8:0::0` would be normalized to `2001:db8::`.
+ FieldInfo_IPV6 FieldInfo_Format = 3
+ // An IP address in either v4 or v6 format as described by the individual
+ // values defined herein. See the comments on the IPV4 and IPV6 types for
+ // allowed normalizations of each.
+ FieldInfo_IPV4_OR_IPV6 FieldInfo_Format = 4
+)
+
+// Enum value maps for FieldInfo_Format.
+var (
+ FieldInfo_Format_name = map[int32]string{
+ 0: "FORMAT_UNSPECIFIED",
+ 1: "UUID4",
+ 2: "IPV4",
+ 3: "IPV6",
+ 4: "IPV4_OR_IPV6",
+ }
+ FieldInfo_Format_value = map[string]int32{
+ "FORMAT_UNSPECIFIED": 0,
+ "UUID4": 1,
+ "IPV4": 2,
+ "IPV6": 3,
+ "IPV4_OR_IPV6": 4,
+ }
+)
+
+func (x FieldInfo_Format) Enum() *FieldInfo_Format {
+ p := new(FieldInfo_Format)
+ *p = x
+ return p
+}
+
+func (x FieldInfo_Format) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (FieldInfo_Format) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_api_field_info_proto_enumTypes[0].Descriptor()
+}
+
+func (FieldInfo_Format) Type() protoreflect.EnumType {
+ return &file_google_api_field_info_proto_enumTypes[0]
+}
+
+func (x FieldInfo_Format) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use FieldInfo_Format.Descriptor instead.
+func (FieldInfo_Format) EnumDescriptor() ([]byte, []int) {
+ return file_google_api_field_info_proto_rawDescGZIP(), []int{0, 0}
+}
+
+// Rich semantic information of an API field beyond basic typing.
+type FieldInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The standard format of a field value. This does not explicitly configure
+ // any API consumer, just documents the API's format for the field it is
+ // applied to.
+ Format FieldInfo_Format `protobuf:"varint,1,opt,name=format,proto3,enum=google.api.FieldInfo_Format" json:"format,omitempty"`
+ // The type(s) that the annotated, generic field may represent.
+ //
+ // Currently, this must only be used on fields of type `google.protobuf.Any`.
+ // Supporting other generic types may be considered in the future.
+ ReferencedTypes []*TypeReference `protobuf:"bytes,2,rep,name=referenced_types,json=referencedTypes,proto3" json:"referenced_types,omitempty"`
+}
+
+func (x *FieldInfo) Reset() {
+ *x = FieldInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_field_info_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FieldInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FieldInfo) ProtoMessage() {}
+
+func (x *FieldInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_field_info_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FieldInfo.ProtoReflect.Descriptor instead.
+func (*FieldInfo) Descriptor() ([]byte, []int) {
+ return file_google_api_field_info_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *FieldInfo) GetFormat() FieldInfo_Format {
+ if x != nil {
+ return x.Format
+ }
+ return FieldInfo_FORMAT_UNSPECIFIED
+}
+
+func (x *FieldInfo) GetReferencedTypes() []*TypeReference {
+ if x != nil {
+ return x.ReferencedTypes
+ }
+ return nil
+}
+
+// A reference to a message type, for use in [FieldInfo][google.api.FieldInfo].
+type TypeReference struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The name of the type that the annotated, generic field may represent.
+ // If the type is in the same protobuf package, the value can be the simple
+ // message name e.g., `"MyMessage"`. Otherwise, the value must be the
+ // fully-qualified message name e.g., `"google.library.v1.Book"`.
+ //
+ // If the type(s) are unknown to the service (e.g. the field accepts generic
+ // user input), use the wildcard `"*"` to denote this behavior.
+ //
+ // See [AIP-202](https://google.aip.dev/202#type-references) for more details.
+ TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"`
+}
+
+func (x *TypeReference) Reset() {
+ *x = TypeReference{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_field_info_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TypeReference) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TypeReference) ProtoMessage() {}
+
+func (x *TypeReference) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_field_info_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TypeReference.ProtoReflect.Descriptor instead.
+func (*TypeReference) Descriptor() ([]byte, []int) {
+ return file_google_api_field_info_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *TypeReference) GetTypeName() string {
+ if x != nil {
+ return x.TypeName
+ }
+ return ""
+}
+
+var file_google_api_field_info_proto_extTypes = []protoimpl.ExtensionInfo{
+ {
+ ExtendedType: (*descriptorpb.FieldOptions)(nil),
+ ExtensionType: (*FieldInfo)(nil),
+ Field: 291403980,
+ Name: "google.api.field_info",
+ Tag: "bytes,291403980,opt,name=field_info",
+ Filename: "google/api/field_info.proto",
+ },
+}
+
+// Extension fields to descriptorpb.FieldOptions.
+var (
+ // Rich semantic descriptor of an API field beyond the basic typing.
+ //
+ // Examples:
+ //
+ // string request_id = 1 [(google.api.field_info).format = UUID4];
+ // string old_ip_address = 2 [(google.api.field_info).format = IPV4];
+ // string new_ip_address = 3 [(google.api.field_info).format = IPV6];
+ // string actual_ip_address = 4 [
+ // (google.api.field_info).format = IPV4_OR_IPV6
+ // ];
+ // google.protobuf.Any generic_field = 5 [
+ // (google.api.field_info).referenced_types = {type_name: "ActualType"},
+ // (google.api.field_info).referenced_types = {type_name: "OtherType"},
+ // ];
+ // google.protobuf.Any generic_user_input = 5 [
+ // (google.api.field_info).referenced_types = {type_name: "*"},
+ // ];
+ //
+ // optional google.api.FieldInfo field_info = 291403980;
+ E_FieldInfo = &file_google_api_field_info_proto_extTypes[0]
+)
+
+var File_google_api_field_info_proto protoreflect.FileDescriptor
+
+var file_google_api_field_info_proto_rawDesc = []byte{
+ 0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65,
+ 0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xda, 0x01, 0x0a, 0x09,
+ 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x34, 0x0a, 0x06, 0x66, 0x6f, 0x72,
+ 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f,
+ 0x2e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12,
+ 0x44, 0x0a, 0x10, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x64, 0x5f, 0x74, 0x79,
+ 0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x66, 0x65, 0x72,
+ 0x65, 0x6e, 0x63, 0x65, 0x52, 0x0f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x64,
+ 0x54, 0x79, 0x70, 0x65, 0x73, 0x22, 0x51, 0x0a, 0x06, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12,
+ 0x16, 0x0a, 0x12, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43,
+ 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x55, 0x55, 0x49, 0x44, 0x34,
+ 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x50, 0x56, 0x34, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04,
+ 0x49, 0x50, 0x56, 0x36, 0x10, 0x03, 0x12, 0x10, 0x0a, 0x0c, 0x49, 0x50, 0x56, 0x34, 0x5f, 0x4f,
+ 0x52, 0x5f, 0x49, 0x50, 0x56, 0x36, 0x10, 0x04, 0x22, 0x2c, 0x0a, 0x0d, 0x54, 0x79, 0x70, 0x65,
+ 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70,
+ 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79,
+ 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x3a, 0x57, 0x0a, 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f,
+ 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x18, 0xcc, 0xf1, 0xf9, 0x8a, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64,
+ 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x09, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x42,
+ 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70,
+ 0x69, 0x42, 0x0e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x50, 0x72, 0x6f, 0x74,
+ 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61,
+ 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61,
+ 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_api_field_info_proto_rawDescOnce sync.Once
+ file_google_api_field_info_proto_rawDescData = file_google_api_field_info_proto_rawDesc
+)
+
+func file_google_api_field_info_proto_rawDescGZIP() []byte {
+ file_google_api_field_info_proto_rawDescOnce.Do(func() {
+ file_google_api_field_info_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_field_info_proto_rawDescData)
+ })
+ return file_google_api_field_info_proto_rawDescData
+}
+
+var file_google_api_field_info_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_google_api_field_info_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_google_api_field_info_proto_goTypes = []interface{}{
+ (FieldInfo_Format)(0), // 0: google.api.FieldInfo.Format
+ (*FieldInfo)(nil), // 1: google.api.FieldInfo
+ (*TypeReference)(nil), // 2: google.api.TypeReference
+ (*descriptorpb.FieldOptions)(nil), // 3: google.protobuf.FieldOptions
+}
+var file_google_api_field_info_proto_depIdxs = []int32{
+ 0, // 0: google.api.FieldInfo.format:type_name -> google.api.FieldInfo.Format
+ 2, // 1: google.api.FieldInfo.referenced_types:type_name -> google.api.TypeReference
+ 3, // 2: google.api.field_info:extendee -> google.protobuf.FieldOptions
+ 1, // 3: google.api.field_info:type_name -> google.api.FieldInfo
+ 4, // [4:4] is the sub-list for method output_type
+ 4, // [4:4] is the sub-list for method input_type
+ 3, // [3:4] is the sub-list for extension type_name
+ 2, // [2:3] is the sub-list for extension extendee
+ 0, // [0:2] is the sub-list for field type_name
+}
+
+func init() { file_google_api_field_info_proto_init() }
+func file_google_api_field_info_proto_init() {
+ if File_google_api_field_info_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_google_api_field_info_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FieldInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_field_info_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TypeReference); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_api_field_info_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 2,
+ NumExtensions: 1,
+ NumServices: 0,
+ },
+ GoTypes: file_google_api_field_info_proto_goTypes,
+ DependencyIndexes: file_google_api_field_info_proto_depIdxs,
+ EnumInfos: file_google_api_field_info_proto_enumTypes,
+ MessageInfos: file_google_api_field_info_proto_msgTypes,
+ ExtensionInfos: file_google_api_field_info_proto_extTypes,
+ }.Build()
+ File_google_api_field_info_proto = out.File
+ file_google_api_field_info_proto_rawDesc = nil
+ file_google_api_field_info_proto_goTypes = nil
+ file_google_api_field_info_proto_depIdxs = nil
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go
new file mode 100644
index 000000000..c93b4f524
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go
@@ -0,0 +1,774 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v4.24.4
+// source: google/api/http.proto
+
+package annotations
+
+import (
+ reflect "reflect"
+ sync "sync"
+
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Defines the HTTP configuration for an API service. It contains a list of
+// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method
+// to one or more HTTP REST API methods.
+type Http struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // A list of HTTP configuration rules that apply to individual API methods.
+ //
+ // **NOTE:** All service configuration rules follow "last one wins" order.
+ Rules []*HttpRule `protobuf:"bytes,1,rep,name=rules,proto3" json:"rules,omitempty"`
+ // When set to true, URL path parameters will be fully URI-decoded except in
+ // cases of single segment matches in reserved expansion, where "%2F" will be
+ // left encoded.
+ //
+ // The default behavior is to not decode RFC 6570 reserved characters in multi
+ // segment matches.
+ FullyDecodeReservedExpansion bool `protobuf:"varint,2,opt,name=fully_decode_reserved_expansion,json=fullyDecodeReservedExpansion,proto3" json:"fully_decode_reserved_expansion,omitempty"`
+}
+
+func (x *Http) Reset() {
+ *x = Http{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_http_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Http) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Http) ProtoMessage() {}
+
+func (x *Http) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_http_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Http.ProtoReflect.Descriptor instead.
+func (*Http) Descriptor() ([]byte, []int) {
+ return file_google_api_http_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Http) GetRules() []*HttpRule {
+ if x != nil {
+ return x.Rules
+ }
+ return nil
+}
+
+func (x *Http) GetFullyDecodeReservedExpansion() bool {
+ if x != nil {
+ return x.FullyDecodeReservedExpansion
+ }
+ return false
+}
+
+// gRPC Transcoding
+//
+// gRPC Transcoding is a feature for mapping between a gRPC method and one or
+// more HTTP REST endpoints. It allows developers to build a single API service
+// that supports both gRPC APIs and REST APIs. Many systems, including [Google
+// APIs](https://github.com/googleapis/googleapis),
+// [Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC
+// Gateway](https://github.com/grpc-ecosystem/grpc-gateway),
+// and [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature
+// and use it for large scale production services.
+//
+// `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies
+// how different portions of the gRPC request message are mapped to the URL
+// path, URL query parameters, and HTTP request body. It also controls how the
+// gRPC response message is mapped to the HTTP response body. `HttpRule` is
+// typically specified as an `google.api.http` annotation on the gRPC method.
+//
+// Each mapping specifies a URL path template and an HTTP method. The path
+// template may refer to one or more fields in the gRPC request message, as long
+// as each field is a non-repeated field with a primitive (non-message) type.
+// The path template controls how fields of the request message are mapped to
+// the URL path.
+//
+// Example:
+//
+// service Messaging {
+// rpc GetMessage(GetMessageRequest) returns (Message) {
+// option (google.api.http) = {
+// get: "/v1/{name=messages/*}"
+// };
+// }
+// }
+// message GetMessageRequest {
+// string name = 1; // Mapped to URL path.
+// }
+// message Message {
+// string text = 1; // The resource content.
+// }
+//
+// This enables an HTTP REST to gRPC mapping as below:
+//
+// - HTTP: `GET /v1/messages/123456`
+// - gRPC: `GetMessage(name: "messages/123456")`
+//
+// Any fields in the request message which are not bound by the path template
+// automatically become HTTP query parameters if there is no HTTP request body.
+// For example:
+//
+// service Messaging {
+// rpc GetMessage(GetMessageRequest) returns (Message) {
+// option (google.api.http) = {
+// get:"/v1/messages/{message_id}"
+// };
+// }
+// }
+// message GetMessageRequest {
+// message SubMessage {
+// string subfield = 1;
+// }
+// string message_id = 1; // Mapped to URL path.
+// int64 revision = 2; // Mapped to URL query parameter `revision`.
+// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`.
+// }
+//
+// This enables a HTTP JSON to RPC mapping as below:
+//
+// - HTTP: `GET /v1/messages/123456?revision=2&sub.subfield=foo`
+// - gRPC: `GetMessage(message_id: "123456" revision: 2 sub:
+// SubMessage(subfield: "foo"))`
+//
+// Note that fields which are mapped to URL query parameters must have a
+// primitive type or a repeated primitive type or a non-repeated message type.
+// In the case of a repeated type, the parameter can be repeated in the URL
+// as `...?param=A¶m=B`. In the case of a message type, each field of the
+// message is mapped to a separate parameter, such as
+// `...?foo.a=A&foo.b=B&foo.c=C`.
+//
+// For HTTP methods that allow a request body, the `body` field
+// specifies the mapping. Consider a REST update method on the
+// message resource collection:
+//
+// service Messaging {
+// rpc UpdateMessage(UpdateMessageRequest) returns (Message) {
+// option (google.api.http) = {
+// patch: "/v1/messages/{message_id}"
+// body: "message"
+// };
+// }
+// }
+// message UpdateMessageRequest {
+// string message_id = 1; // mapped to the URL
+// Message message = 2; // mapped to the body
+// }
+//
+// The following HTTP JSON to RPC mapping is enabled, where the
+// representation of the JSON in the request body is determined by
+// protos JSON encoding:
+//
+// - HTTP: `PATCH /v1/messages/123456 { "text": "Hi!" }`
+// - gRPC: `UpdateMessage(message_id: "123456" message { text: "Hi!" })`
+//
+// The special name `*` can be used in the body mapping to define that
+// every field not bound by the path template should be mapped to the
+// request body. This enables the following alternative definition of
+// the update method:
+//
+// service Messaging {
+// rpc UpdateMessage(Message) returns (Message) {
+// option (google.api.http) = {
+// patch: "/v1/messages/{message_id}"
+// body: "*"
+// };
+// }
+// }
+// message Message {
+// string message_id = 1;
+// string text = 2;
+// }
+//
+// The following HTTP JSON to RPC mapping is enabled:
+//
+// - HTTP: `PATCH /v1/messages/123456 { "text": "Hi!" }`
+// - gRPC: `UpdateMessage(message_id: "123456" text: "Hi!")`
+//
+// Note that when using `*` in the body mapping, it is not possible to
+// have HTTP parameters, as all fields not bound by the path end in
+// the body. This makes this option more rarely used in practice when
+// defining REST APIs. The common usage of `*` is in custom methods
+// which don't use the URL at all for transferring data.
+//
+// It is possible to define multiple HTTP methods for one RPC by using
+// the `additional_bindings` option. Example:
+//
+// service Messaging {
+// rpc GetMessage(GetMessageRequest) returns (Message) {
+// option (google.api.http) = {
+// get: "/v1/messages/{message_id}"
+// additional_bindings {
+// get: "/v1/users/{user_id}/messages/{message_id}"
+// }
+// };
+// }
+// }
+// message GetMessageRequest {
+// string message_id = 1;
+// string user_id = 2;
+// }
+//
+// This enables the following two alternative HTTP JSON to RPC mappings:
+//
+// - HTTP: `GET /v1/messages/123456`
+// - gRPC: `GetMessage(message_id: "123456")`
+//
+// - HTTP: `GET /v1/users/me/messages/123456`
+// - gRPC: `GetMessage(user_id: "me" message_id: "123456")`
+//
+// # Rules for HTTP mapping
+//
+// 1. Leaf request fields (recursive expansion nested messages in the request
+// message) are classified into three categories:
+// - Fields referred by the path template. They are passed via the URL path.
+// - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They
+// are passed via the HTTP
+// request body.
+// - All other fields are passed via the URL query parameters, and the
+// parameter name is the field path in the request message. A repeated
+// field can be represented as multiple query parameters under the same
+// name.
+// 2. If [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL
+// query parameter, all fields
+// are passed via URL path and HTTP request body.
+// 3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no HTTP
+// request body, all
+// fields are passed via URL path and URL query parameters.
+//
+// Path template syntax
+//
+// Template = "/" Segments [ Verb ] ;
+// Segments = Segment { "/" Segment } ;
+// Segment = "*" | "**" | LITERAL | Variable ;
+// Variable = "{" FieldPath [ "=" Segments ] "}" ;
+// FieldPath = IDENT { "." IDENT } ;
+// Verb = ":" LITERAL ;
+//
+// The syntax `*` matches a single URL path segment. The syntax `**` matches
+// zero or more URL path segments, which must be the last part of the URL path
+// except the `Verb`.
+//
+// The syntax `Variable` matches part of the URL path as specified by its
+// template. A variable template must not contain other variables. If a variable
+// matches a single path segment, its template may be omitted, e.g. `{var}`
+// is equivalent to `{var=*}`.
+//
+// The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL`
+// contains any reserved character, such characters should be percent-encoded
+// before the matching.
+//
+// If a variable contains exactly one path segment, such as `"{var}"` or
+// `"{var=*}"`, when such a variable is expanded into a URL path on the client
+// side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The
+// server side does the reverse decoding. Such variables show up in the
+// [Discovery
+// Document](https://developers.google.com/discovery/v1/reference/apis) as
+// `{var}`.
+//
+// If a variable contains multiple path segments, such as `"{var=foo/*}"`
+// or `"{var=**}"`, when such a variable is expanded into a URL path on the
+// client side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded.
+// The server side does the reverse decoding, except "%2F" and "%2f" are left
+// unchanged. Such variables show up in the
+// [Discovery
+// Document](https://developers.google.com/discovery/v1/reference/apis) as
+// `{+var}`.
+//
+// # Using gRPC API Service Configuration
+//
+// gRPC API Service Configuration (service config) is a configuration language
+// for configuring a gRPC service to become a user-facing product. The
+// service config is simply the YAML representation of the `google.api.Service`
+// proto message.
+//
+// As an alternative to annotating your proto file, you can configure gRPC
+// transcoding in your service config YAML files. You do this by specifying a
+// `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same
+// effect as the proto annotation. This can be particularly useful if you
+// have a proto that is reused in multiple services. Note that any transcoding
+// specified in the service config will override any matching transcoding
+// configuration in the proto.
+//
+// The following example selects a gRPC method and applies an `HttpRule` to it:
+//
+// http:
+// rules:
+// - selector: example.v1.Messaging.GetMessage
+// get: /v1/messages/{message_id}/{sub.subfield}
+//
+// # Special notes
+//
+// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the
+// proto to JSON conversion must follow the [proto3
+// specification](https://developers.google.com/protocol-buffers/docs/proto3#json).
+//
+// While the single segment variable follows the semantics of
+// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String
+// Expansion, the multi segment variable **does not** follow RFC 6570 Section
+// 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion
+// does not expand special characters like `?` and `#`, which would lead
+// to invalid URLs. As the result, gRPC Transcoding uses a custom encoding
+// for multi segment variables.
+//
+// The path variables **must not** refer to any repeated or mapped field,
+// because client libraries are not capable of handling such variable expansion.
+//
+// The path variables **must not** capture the leading "/" character. The reason
+// is that the most common use case "{var}" does not capture the leading "/"
+// character. For consistency, all path variables must share the same behavior.
+//
+// Repeated message fields must not be mapped to URL query parameters, because
+// no client library can support such complicated mapping.
+//
+// If an API needs to use a JSON array for request or response body, it can map
+// the request or response body to a repeated field. However, some gRPC
+// Transcoding implementations may not support this feature.
+type HttpRule struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Selects a method to which this rule applies.
+ //
+ // Refer to [selector][google.api.DocumentationRule.selector] for syntax
+ // details.
+ Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"`
+ // Determines the URL pattern is matched by this rules. This pattern can be
+ // used with any of the {get|put|post|delete|patch} methods. A custom method
+ // can be defined using the 'custom' field.
+ //
+ // Types that are assignable to Pattern:
+ //
+ // *HttpRule_Get
+ // *HttpRule_Put
+ // *HttpRule_Post
+ // *HttpRule_Delete
+ // *HttpRule_Patch
+ // *HttpRule_Custom
+ Pattern isHttpRule_Pattern `protobuf_oneof:"pattern"`
+ // The name of the request field whose value is mapped to the HTTP request
+ // body, or `*` for mapping all request fields not captured by the path
+ // pattern to the HTTP body, or omitted for not having any HTTP request body.
+ //
+ // NOTE: the referred field must be present at the top-level of the request
+ // message type.
+ Body string `protobuf:"bytes,7,opt,name=body,proto3" json:"body,omitempty"`
+ // Optional. The name of the response field whose value is mapped to the HTTP
+ // response body. When omitted, the entire response message will be used
+ // as the HTTP response body.
+ //
+ // NOTE: The referred field must be present at the top-level of the response
+ // message type.
+ ResponseBody string `protobuf:"bytes,12,opt,name=response_body,json=responseBody,proto3" json:"response_body,omitempty"`
+ // Additional HTTP bindings for the selector. Nested bindings must
+ // not contain an `additional_bindings` field themselves (that is,
+ // the nesting may only be one level deep).
+ AdditionalBindings []*HttpRule `protobuf:"bytes,11,rep,name=additional_bindings,json=additionalBindings,proto3" json:"additional_bindings,omitempty"`
+}
+
+func (x *HttpRule) Reset() {
+ *x = HttpRule{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_http_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HttpRule) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HttpRule) ProtoMessage() {}
+
+func (x *HttpRule) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_http_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HttpRule.ProtoReflect.Descriptor instead.
+func (*HttpRule) Descriptor() ([]byte, []int) {
+ return file_google_api_http_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *HttpRule) GetSelector() string {
+ if x != nil {
+ return x.Selector
+ }
+ return ""
+}
+
+func (m *HttpRule) GetPattern() isHttpRule_Pattern {
+ if m != nil {
+ return m.Pattern
+ }
+ return nil
+}
+
+func (x *HttpRule) GetGet() string {
+ if x, ok := x.GetPattern().(*HttpRule_Get); ok {
+ return x.Get
+ }
+ return ""
+}
+
+func (x *HttpRule) GetPut() string {
+ if x, ok := x.GetPattern().(*HttpRule_Put); ok {
+ return x.Put
+ }
+ return ""
+}
+
+func (x *HttpRule) GetPost() string {
+ if x, ok := x.GetPattern().(*HttpRule_Post); ok {
+ return x.Post
+ }
+ return ""
+}
+
+func (x *HttpRule) GetDelete() string {
+ if x, ok := x.GetPattern().(*HttpRule_Delete); ok {
+ return x.Delete
+ }
+ return ""
+}
+
+func (x *HttpRule) GetPatch() string {
+ if x, ok := x.GetPattern().(*HttpRule_Patch); ok {
+ return x.Patch
+ }
+ return ""
+}
+
+func (x *HttpRule) GetCustom() *CustomHttpPattern {
+ if x, ok := x.GetPattern().(*HttpRule_Custom); ok {
+ return x.Custom
+ }
+ return nil
+}
+
+func (x *HttpRule) GetBody() string {
+ if x != nil {
+ return x.Body
+ }
+ return ""
+}
+
+func (x *HttpRule) GetResponseBody() string {
+ if x != nil {
+ return x.ResponseBody
+ }
+ return ""
+}
+
+func (x *HttpRule) GetAdditionalBindings() []*HttpRule {
+ if x != nil {
+ return x.AdditionalBindings
+ }
+ return nil
+}
+
+type isHttpRule_Pattern interface {
+ isHttpRule_Pattern()
+}
+
+type HttpRule_Get struct {
+ // Maps to HTTP GET. Used for listing and getting information about
+ // resources.
+ Get string `protobuf:"bytes,2,opt,name=get,proto3,oneof"`
+}
+
+type HttpRule_Put struct {
+ // Maps to HTTP PUT. Used for replacing a resource.
+ Put string `protobuf:"bytes,3,opt,name=put,proto3,oneof"`
+}
+
+type HttpRule_Post struct {
+ // Maps to HTTP POST. Used for creating a resource or performing an action.
+ Post string `protobuf:"bytes,4,opt,name=post,proto3,oneof"`
+}
+
+type HttpRule_Delete struct {
+ // Maps to HTTP DELETE. Used for deleting a resource.
+ Delete string `protobuf:"bytes,5,opt,name=delete,proto3,oneof"`
+}
+
+type HttpRule_Patch struct {
+ // Maps to HTTP PATCH. Used for updating a resource.
+ Patch string `protobuf:"bytes,6,opt,name=patch,proto3,oneof"`
+}
+
+type HttpRule_Custom struct {
+ // The custom pattern is used for specifying an HTTP method that is not
+ // included in the `pattern` field, such as HEAD, or "*" to leave the
+ // HTTP method unspecified for this rule. The wild-card rule is useful
+ // for services that provide content to Web (HTML) clients.
+ Custom *CustomHttpPattern `protobuf:"bytes,8,opt,name=custom,proto3,oneof"`
+}
+
+func (*HttpRule_Get) isHttpRule_Pattern() {}
+
+func (*HttpRule_Put) isHttpRule_Pattern() {}
+
+func (*HttpRule_Post) isHttpRule_Pattern() {}
+
+func (*HttpRule_Delete) isHttpRule_Pattern() {}
+
+func (*HttpRule_Patch) isHttpRule_Pattern() {}
+
+func (*HttpRule_Custom) isHttpRule_Pattern() {}
+
+// A custom pattern is used for defining custom HTTP verb.
+type CustomHttpPattern struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The name of this custom HTTP verb.
+ Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"`
+ // The path matched by this custom verb.
+ Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"`
+}
+
+func (x *CustomHttpPattern) Reset() {
+ *x = CustomHttpPattern{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_http_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CustomHttpPattern) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CustomHttpPattern) ProtoMessage() {}
+
+func (x *CustomHttpPattern) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_http_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CustomHttpPattern.ProtoReflect.Descriptor instead.
+func (*CustomHttpPattern) Descriptor() ([]byte, []int) {
+ return file_google_api_http_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *CustomHttpPattern) GetKind() string {
+ if x != nil {
+ return x.Kind
+ }
+ return ""
+}
+
+func (x *CustomHttpPattern) GetPath() string {
+ if x != nil {
+ return x.Path
+ }
+ return ""
+}
+
+var File_google_api_http_proto protoreflect.FileDescriptor
+
+var file_google_api_http_proto_rawDesc = []byte{
+ 0x0a, 0x15, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74,
+ 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x61, 0x70, 0x69, 0x22, 0x79, 0x0a, 0x04, 0x48, 0x74, 0x74, 0x70, 0x12, 0x2a, 0x0a, 0x05, 0x72,
+ 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65,
+ 0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x45, 0x0a, 0x1f, 0x66, 0x75, 0x6c, 0x6c, 0x79,
+ 0x5f, 0x64, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64,
+ 0x5f, 0x65, 0x78, 0x70, 0x61, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x1c, 0x66, 0x75, 0x6c, 0x6c, 0x79, 0x44, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73,
+ 0x65, 0x72, 0x76, 0x65, 0x64, 0x45, 0x78, 0x70, 0x61, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xda,
+ 0x02, 0x0a, 0x08, 0x48, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x73,
+ 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73,
+ 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x03, 0x67, 0x65, 0x74, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x67, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x03, 0x70,
+ 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x70, 0x75, 0x74, 0x12,
+ 0x14, 0x0a, 0x04, 0x70, 0x6f, 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52,
+ 0x04, 0x70, 0x6f, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18,
+ 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12,
+ 0x16, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00,
+ 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x12, 0x37, 0x0a, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f,
+ 0x6d, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x74, 0x74, 0x70, 0x50,
+ 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x48, 0x00, 0x52, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d,
+ 0x12, 0x12, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
+ 0x62, 0x6f, 0x64, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x45, 0x0a, 0x13, 0x61, 0x64, 0x64,
+ 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x62, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73,
+ 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x61, 0x70, 0x69, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x12, 0x61, 0x64,
+ 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x42, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73,
+ 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x22, 0x3b, 0x0a, 0x11, 0x43,
+ 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x74, 0x74, 0x70, 0x50, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e,
+ 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
+ 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x42, 0x67, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x09, 0x48, 0x74, 0x74, 0x70,
+ 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61,
+ 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61,
+ 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50,
+ 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_api_http_proto_rawDescOnce sync.Once
+ file_google_api_http_proto_rawDescData = file_google_api_http_proto_rawDesc
+)
+
+func file_google_api_http_proto_rawDescGZIP() []byte {
+ file_google_api_http_proto_rawDescOnce.Do(func() {
+ file_google_api_http_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_http_proto_rawDescData)
+ })
+ return file_google_api_http_proto_rawDescData
+}
+
+var file_google_api_http_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
+var file_google_api_http_proto_goTypes = []interface{}{
+ (*Http)(nil), // 0: google.api.Http
+ (*HttpRule)(nil), // 1: google.api.HttpRule
+ (*CustomHttpPattern)(nil), // 2: google.api.CustomHttpPattern
+}
+var file_google_api_http_proto_depIdxs = []int32{
+ 1, // 0: google.api.Http.rules:type_name -> google.api.HttpRule
+ 2, // 1: google.api.HttpRule.custom:type_name -> google.api.CustomHttpPattern
+ 1, // 2: google.api.HttpRule.additional_bindings:type_name -> google.api.HttpRule
+ 3, // [3:3] is the sub-list for method output_type
+ 3, // [3:3] is the sub-list for method input_type
+ 3, // [3:3] is the sub-list for extension type_name
+ 3, // [3:3] is the sub-list for extension extendee
+ 0, // [0:3] is the sub-list for field type_name
+}
+
+func init() { file_google_api_http_proto_init() }
+func file_google_api_http_proto_init() {
+ if File_google_api_http_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_google_api_http_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Http); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_http_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HttpRule); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_http_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CustomHttpPattern); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_google_api_http_proto_msgTypes[1].OneofWrappers = []interface{}{
+ (*HttpRule_Get)(nil),
+ (*HttpRule_Put)(nil),
+ (*HttpRule_Post)(nil),
+ (*HttpRule_Delete)(nil),
+ (*HttpRule_Patch)(nil),
+ (*HttpRule_Custom)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_api_http_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 3,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_google_api_http_proto_goTypes,
+ DependencyIndexes: file_google_api_http_proto_depIdxs,
+ MessageInfos: file_google_api_http_proto_msgTypes,
+ }.Build()
+ File_google_api_http_proto = out.File
+ file_google_api_http_proto_rawDesc = nil
+ file_google_api_http_proto_goTypes = nil
+ file_google_api_http_proto_depIdxs = nil
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go
new file mode 100644
index 000000000..a1c543a94
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go
@@ -0,0 +1,659 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v4.24.4
+// source: google/api/resource.proto
+
+package annotations
+
+import (
+ reflect "reflect"
+ sync "sync"
+
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// A description of the historical or future-looking state of the
+// resource pattern.
+type ResourceDescriptor_History int32
+
+const (
+ // The "unset" value.
+ ResourceDescriptor_HISTORY_UNSPECIFIED ResourceDescriptor_History = 0
+ // The resource originally had one pattern and launched as such, and
+ // additional patterns were added later.
+ ResourceDescriptor_ORIGINALLY_SINGLE_PATTERN ResourceDescriptor_History = 1
+ // The resource has one pattern, but the API owner expects to add more
+ // later. (This is the inverse of ORIGINALLY_SINGLE_PATTERN, and prevents
+ // that from being necessary once there are multiple patterns.)
+ ResourceDescriptor_FUTURE_MULTI_PATTERN ResourceDescriptor_History = 2
+)
+
+// Enum value maps for ResourceDescriptor_History.
+var (
+ ResourceDescriptor_History_name = map[int32]string{
+ 0: "HISTORY_UNSPECIFIED",
+ 1: "ORIGINALLY_SINGLE_PATTERN",
+ 2: "FUTURE_MULTI_PATTERN",
+ }
+ ResourceDescriptor_History_value = map[string]int32{
+ "HISTORY_UNSPECIFIED": 0,
+ "ORIGINALLY_SINGLE_PATTERN": 1,
+ "FUTURE_MULTI_PATTERN": 2,
+ }
+)
+
+func (x ResourceDescriptor_History) Enum() *ResourceDescriptor_History {
+ p := new(ResourceDescriptor_History)
+ *p = x
+ return p
+}
+
+func (x ResourceDescriptor_History) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (ResourceDescriptor_History) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_api_resource_proto_enumTypes[0].Descriptor()
+}
+
+func (ResourceDescriptor_History) Type() protoreflect.EnumType {
+ return &file_google_api_resource_proto_enumTypes[0]
+}
+
+func (x ResourceDescriptor_History) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use ResourceDescriptor_History.Descriptor instead.
+func (ResourceDescriptor_History) EnumDescriptor() ([]byte, []int) {
+ return file_google_api_resource_proto_rawDescGZIP(), []int{0, 0}
+}
+
+// A flag representing a specific style that a resource claims to conform to.
+type ResourceDescriptor_Style int32
+
+const (
+ // The unspecified value. Do not use.
+ ResourceDescriptor_STYLE_UNSPECIFIED ResourceDescriptor_Style = 0
+ // This resource is intended to be "declarative-friendly".
+ //
+ // Declarative-friendly resources must be more strictly consistent, and
+ // setting this to true communicates to tools that this resource should
+ // adhere to declarative-friendly expectations.
+ //
+ // Note: This is used by the API linter (linter.aip.dev) to enable
+ // additional checks.
+ ResourceDescriptor_DECLARATIVE_FRIENDLY ResourceDescriptor_Style = 1
+)
+
+// Enum value maps for ResourceDescriptor_Style.
+var (
+ ResourceDescriptor_Style_name = map[int32]string{
+ 0: "STYLE_UNSPECIFIED",
+ 1: "DECLARATIVE_FRIENDLY",
+ }
+ ResourceDescriptor_Style_value = map[string]int32{
+ "STYLE_UNSPECIFIED": 0,
+ "DECLARATIVE_FRIENDLY": 1,
+ }
+)
+
+func (x ResourceDescriptor_Style) Enum() *ResourceDescriptor_Style {
+ p := new(ResourceDescriptor_Style)
+ *p = x
+ return p
+}
+
+func (x ResourceDescriptor_Style) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (ResourceDescriptor_Style) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_api_resource_proto_enumTypes[1].Descriptor()
+}
+
+func (ResourceDescriptor_Style) Type() protoreflect.EnumType {
+ return &file_google_api_resource_proto_enumTypes[1]
+}
+
+func (x ResourceDescriptor_Style) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use ResourceDescriptor_Style.Descriptor instead.
+func (ResourceDescriptor_Style) EnumDescriptor() ([]byte, []int) {
+ return file_google_api_resource_proto_rawDescGZIP(), []int{0, 1}
+}
+
+// A simple descriptor of a resource type.
+//
+// ResourceDescriptor annotates a resource message (either by means of a
+// protobuf annotation or use in the service config), and associates the
+// resource's schema, the resource type, and the pattern of the resource name.
+//
+// Example:
+//
+// message Topic {
+// // Indicates this message defines a resource schema.
+// // Declares the resource type in the format of {service}/{kind}.
+// // For Kubernetes resources, the format is {api group}/{kind}.
+// option (google.api.resource) = {
+// type: "pubsub.googleapis.com/Topic"
+// pattern: "projects/{project}/topics/{topic}"
+// };
+// }
+//
+// The ResourceDescriptor Yaml config will look like:
+//
+// resources:
+// - type: "pubsub.googleapis.com/Topic"
+// pattern: "projects/{project}/topics/{topic}"
+//
+// Sometimes, resources have multiple patterns, typically because they can
+// live under multiple parents.
+//
+// Example:
+//
+// message LogEntry {
+// option (google.api.resource) = {
+// type: "logging.googleapis.com/LogEntry"
+// pattern: "projects/{project}/logs/{log}"
+// pattern: "folders/{folder}/logs/{log}"
+// pattern: "organizations/{organization}/logs/{log}"
+// pattern: "billingAccounts/{billing_account}/logs/{log}"
+// };
+// }
+//
+// The ResourceDescriptor Yaml config will look like:
+//
+// resources:
+// - type: 'logging.googleapis.com/LogEntry'
+// pattern: "projects/{project}/logs/{log}"
+// pattern: "folders/{folder}/logs/{log}"
+// pattern: "organizations/{organization}/logs/{log}"
+// pattern: "billingAccounts/{billing_account}/logs/{log}"
+type ResourceDescriptor struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The resource type. It must be in the format of
+ // {service_name}/{resource_type_kind}. The `resource_type_kind` must be
+ // singular and must not include version numbers.
+ //
+ // Example: `storage.googleapis.com/Bucket`
+ //
+ // The value of the resource_type_kind must follow the regular expression
+ // /[A-Za-z][a-zA-Z0-9]+/. It should start with an upper case character and
+ // should use PascalCase (UpperCamelCase). The maximum number of
+ // characters allowed for the `resource_type_kind` is 100.
+ Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+ // Optional. The relative resource name pattern associated with this resource
+ // type. The DNS prefix of the full resource name shouldn't be specified here.
+ //
+ // The path pattern must follow the syntax, which aligns with HTTP binding
+ // syntax:
+ //
+ // Template = Segment { "/" Segment } ;
+ // Segment = LITERAL | Variable ;
+ // Variable = "{" LITERAL "}" ;
+ //
+ // Examples:
+ //
+ // - "projects/{project}/topics/{topic}"
+ // - "projects/{project}/knowledgeBases/{knowledge_base}"
+ //
+ // The components in braces correspond to the IDs for each resource in the
+ // hierarchy. It is expected that, if multiple patterns are provided,
+ // the same component name (e.g. "project") refers to IDs of the same
+ // type of resource.
+ Pattern []string `protobuf:"bytes,2,rep,name=pattern,proto3" json:"pattern,omitempty"`
+ // Optional. The field on the resource that designates the resource name
+ // field. If omitted, this is assumed to be "name".
+ NameField string `protobuf:"bytes,3,opt,name=name_field,json=nameField,proto3" json:"name_field,omitempty"`
+ // Optional. The historical or future-looking state of the resource pattern.
+ //
+ // Example:
+ //
+ // // The InspectTemplate message originally only supported resource
+ // // names with organization, and project was added later.
+ // message InspectTemplate {
+ // option (google.api.resource) = {
+ // type: "dlp.googleapis.com/InspectTemplate"
+ // pattern:
+ // "organizations/{organization}/inspectTemplates/{inspect_template}"
+ // pattern: "projects/{project}/inspectTemplates/{inspect_template}"
+ // history: ORIGINALLY_SINGLE_PATTERN
+ // };
+ // }
+ History ResourceDescriptor_History `protobuf:"varint,4,opt,name=history,proto3,enum=google.api.ResourceDescriptor_History" json:"history,omitempty"`
+ // The plural name used in the resource name and permission names, such as
+ // 'projects' for the resource name of 'projects/{project}' and the permission
+ // name of 'cloudresourcemanager.googleapis.com/projects.get'. One exception
+ // to this is for Nested Collections that have stuttering names, as defined
+ // in [AIP-122](https://google.aip.dev/122#nested-collections), where the
+ // collection ID in the resource name pattern does not necessarily directly
+ // match the `plural` value.
+ //
+ // It is the same concept of the `plural` field in k8s CRD spec
+ // https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/
+ //
+ // Note: The plural form is required even for singleton resources. See
+ // https://aip.dev/156
+ Plural string `protobuf:"bytes,5,opt,name=plural,proto3" json:"plural,omitempty"`
+ // The same concept of the `singular` field in k8s CRD spec
+ // https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/
+ // Such as "project" for the `resourcemanager.googleapis.com/Project` type.
+ Singular string `protobuf:"bytes,6,opt,name=singular,proto3" json:"singular,omitempty"`
+ // Style flag(s) for this resource.
+ // These indicate that a resource is expected to conform to a given
+ // style. See the specific style flags for additional information.
+ Style []ResourceDescriptor_Style `protobuf:"varint,10,rep,packed,name=style,proto3,enum=google.api.ResourceDescriptor_Style" json:"style,omitempty"`
+}
+
+func (x *ResourceDescriptor) Reset() {
+ *x = ResourceDescriptor{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_resource_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ResourceDescriptor) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ResourceDescriptor) ProtoMessage() {}
+
+func (x *ResourceDescriptor) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_resource_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ResourceDescriptor.ProtoReflect.Descriptor instead.
+func (*ResourceDescriptor) Descriptor() ([]byte, []int) {
+ return file_google_api_resource_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ResourceDescriptor) GetType() string {
+ if x != nil {
+ return x.Type
+ }
+ return ""
+}
+
+func (x *ResourceDescriptor) GetPattern() []string {
+ if x != nil {
+ return x.Pattern
+ }
+ return nil
+}
+
+func (x *ResourceDescriptor) GetNameField() string {
+ if x != nil {
+ return x.NameField
+ }
+ return ""
+}
+
+func (x *ResourceDescriptor) GetHistory() ResourceDescriptor_History {
+ if x != nil {
+ return x.History
+ }
+ return ResourceDescriptor_HISTORY_UNSPECIFIED
+}
+
+func (x *ResourceDescriptor) GetPlural() string {
+ if x != nil {
+ return x.Plural
+ }
+ return ""
+}
+
+func (x *ResourceDescriptor) GetSingular() string {
+ if x != nil {
+ return x.Singular
+ }
+ return ""
+}
+
+func (x *ResourceDescriptor) GetStyle() []ResourceDescriptor_Style {
+ if x != nil {
+ return x.Style
+ }
+ return nil
+}
+
+// Defines a proto annotation that describes a string field that refers to
+// an API resource.
+type ResourceReference struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The resource type that the annotated field references.
+ //
+ // Example:
+ //
+ // message Subscription {
+ // string topic = 2 [(google.api.resource_reference) = {
+ // type: "pubsub.googleapis.com/Topic"
+ // }];
+ // }
+ //
+ // Occasionally, a field may reference an arbitrary resource. In this case,
+ // APIs use the special value * in their resource reference.
+ //
+ // Example:
+ //
+ // message GetIamPolicyRequest {
+ // string resource = 2 [(google.api.resource_reference) = {
+ // type: "*"
+ // }];
+ // }
+ Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+ // The resource type of a child collection that the annotated field
+ // references. This is useful for annotating the `parent` field that
+ // doesn't have a fixed resource type.
+ //
+ // Example:
+ //
+ // message ListLogEntriesRequest {
+ // string parent = 1 [(google.api.resource_reference) = {
+ // child_type: "logging.googleapis.com/LogEntry"
+ // };
+ // }
+ ChildType string `protobuf:"bytes,2,opt,name=child_type,json=childType,proto3" json:"child_type,omitempty"`
+}
+
+func (x *ResourceReference) Reset() {
+ *x = ResourceReference{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_resource_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ResourceReference) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ResourceReference) ProtoMessage() {}
+
+func (x *ResourceReference) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_resource_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ResourceReference.ProtoReflect.Descriptor instead.
+func (*ResourceReference) Descriptor() ([]byte, []int) {
+ return file_google_api_resource_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ResourceReference) GetType() string {
+ if x != nil {
+ return x.Type
+ }
+ return ""
+}
+
+func (x *ResourceReference) GetChildType() string {
+ if x != nil {
+ return x.ChildType
+ }
+ return ""
+}
+
+var file_google_api_resource_proto_extTypes = []protoimpl.ExtensionInfo{
+ {
+ ExtendedType: (*descriptorpb.FieldOptions)(nil),
+ ExtensionType: (*ResourceReference)(nil),
+ Field: 1055,
+ Name: "google.api.resource_reference",
+ Tag: "bytes,1055,opt,name=resource_reference",
+ Filename: "google/api/resource.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.FileOptions)(nil),
+ ExtensionType: ([]*ResourceDescriptor)(nil),
+ Field: 1053,
+ Name: "google.api.resource_definition",
+ Tag: "bytes,1053,rep,name=resource_definition",
+ Filename: "google/api/resource.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.MessageOptions)(nil),
+ ExtensionType: (*ResourceDescriptor)(nil),
+ Field: 1053,
+ Name: "google.api.resource",
+ Tag: "bytes,1053,opt,name=resource",
+ Filename: "google/api/resource.proto",
+ },
+}
+
+// Extension fields to descriptorpb.FieldOptions.
+var (
+ // An annotation that describes a resource reference, see
+ // [ResourceReference][].
+ //
+ // optional google.api.ResourceReference resource_reference = 1055;
+ E_ResourceReference = &file_google_api_resource_proto_extTypes[0]
+)
+
+// Extension fields to descriptorpb.FileOptions.
+var (
+ // An annotation that describes a resource definition without a corresponding
+ // message; see [ResourceDescriptor][].
+ //
+ // repeated google.api.ResourceDescriptor resource_definition = 1053;
+ E_ResourceDefinition = &file_google_api_resource_proto_extTypes[1]
+)
+
+// Extension fields to descriptorpb.MessageOptions.
+var (
+ // An annotation that describes a resource definition, see
+ // [ResourceDescriptor][].
+ //
+ // optional google.api.ResourceDescriptor resource = 1053;
+ E_Resource = &file_google_api_resource_proto_extTypes[2]
+)
+
+var File_google_api_resource_proto protoreflect.FileDescriptor
+
+var file_google_api_resource_proto_rawDesc = []byte{
+ 0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xaa, 0x03, 0x0a, 0x12, 0x52, 0x65,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
+ 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
+ 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18,
+ 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1d,
+ 0x0a, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x40, 0x0a,
+ 0x07, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x48,
+ 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x07, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x12,
+ 0x16, 0x0a, 0x06, 0x70, 0x6c, 0x75, 0x72, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x06, 0x70, 0x6c, 0x75, 0x72, 0x61, 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x69, 0x6e, 0x67, 0x75,
+ 0x6c, 0x61, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x69, 0x6e, 0x67, 0x75,
+ 0x6c, 0x61, 0x72, 0x12, 0x3a, 0x0a, 0x05, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x18, 0x0a, 0x20, 0x03,
+ 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e,
+ 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+ 0x6f, 0x72, 0x2e, 0x53, 0x74, 0x79, 0x6c, 0x65, 0x52, 0x05, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x22,
+ 0x5b, 0x0a, 0x07, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x17, 0x0a, 0x13, 0x48, 0x49,
+ 0x53, 0x54, 0x4f, 0x52, 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45,
+ 0x44, 0x10, 0x00, 0x12, 0x1d, 0x0a, 0x19, 0x4f, 0x52, 0x49, 0x47, 0x49, 0x4e, 0x41, 0x4c, 0x4c,
+ 0x59, 0x5f, 0x53, 0x49, 0x4e, 0x47, 0x4c, 0x45, 0x5f, 0x50, 0x41, 0x54, 0x54, 0x45, 0x52, 0x4e,
+ 0x10, 0x01, 0x12, 0x18, 0x0a, 0x14, 0x46, 0x55, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x4d, 0x55, 0x4c,
+ 0x54, 0x49, 0x5f, 0x50, 0x41, 0x54, 0x54, 0x45, 0x52, 0x4e, 0x10, 0x02, 0x22, 0x38, 0x0a, 0x05,
+ 0x53, 0x74, 0x79, 0x6c, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x59, 0x4c, 0x45, 0x5f, 0x55,
+ 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x18, 0x0a, 0x14,
+ 0x44, 0x45, 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x46, 0x52, 0x49, 0x45,
+ 0x4e, 0x44, 0x4c, 0x59, 0x10, 0x01, 0x22, 0x46, 0x0a, 0x11, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74,
+ 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12,
+ 0x1d, 0x0a, 0x0a, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x6c,
+ 0x0a, 0x12, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72,
+ 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x18, 0x9f, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x11, 0x72, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x3a, 0x6e, 0x0a, 0x13,
+ 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74,
+ 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x18, 0x9d, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x12, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x5c, 0x0a, 0x08,
+ 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61,
+ 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9d, 0x08, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
+ 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x6b, 0x0a, 0x0e, 0x63, 0x6f,
+ 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0d, 0x52, 0x65,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67,
+ 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_api_resource_proto_rawDescOnce sync.Once
+ file_google_api_resource_proto_rawDescData = file_google_api_resource_proto_rawDesc
+)
+
+func file_google_api_resource_proto_rawDescGZIP() []byte {
+ file_google_api_resource_proto_rawDescOnce.Do(func() {
+ file_google_api_resource_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_resource_proto_rawDescData)
+ })
+ return file_google_api_resource_proto_rawDescData
+}
+
+var file_google_api_resource_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
+var file_google_api_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_google_api_resource_proto_goTypes = []interface{}{
+ (ResourceDescriptor_History)(0), // 0: google.api.ResourceDescriptor.History
+ (ResourceDescriptor_Style)(0), // 1: google.api.ResourceDescriptor.Style
+ (*ResourceDescriptor)(nil), // 2: google.api.ResourceDescriptor
+ (*ResourceReference)(nil), // 3: google.api.ResourceReference
+ (*descriptorpb.FieldOptions)(nil), // 4: google.protobuf.FieldOptions
+ (*descriptorpb.FileOptions)(nil), // 5: google.protobuf.FileOptions
+ (*descriptorpb.MessageOptions)(nil), // 6: google.protobuf.MessageOptions
+}
+var file_google_api_resource_proto_depIdxs = []int32{
+ 0, // 0: google.api.ResourceDescriptor.history:type_name -> google.api.ResourceDescriptor.History
+ 1, // 1: google.api.ResourceDescriptor.style:type_name -> google.api.ResourceDescriptor.Style
+ 4, // 2: google.api.resource_reference:extendee -> google.protobuf.FieldOptions
+ 5, // 3: google.api.resource_definition:extendee -> google.protobuf.FileOptions
+ 6, // 4: google.api.resource:extendee -> google.protobuf.MessageOptions
+ 3, // 5: google.api.resource_reference:type_name -> google.api.ResourceReference
+ 2, // 6: google.api.resource_definition:type_name -> google.api.ResourceDescriptor
+ 2, // 7: google.api.resource:type_name -> google.api.ResourceDescriptor
+ 8, // [8:8] is the sub-list for method output_type
+ 8, // [8:8] is the sub-list for method input_type
+ 5, // [5:8] is the sub-list for extension type_name
+ 2, // [2:5] is the sub-list for extension extendee
+ 0, // [0:2] is the sub-list for field type_name
+}
+
+func init() { file_google_api_resource_proto_init() }
+func file_google_api_resource_proto_init() {
+ if File_google_api_resource_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_google_api_resource_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ResourceDescriptor); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_resource_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ResourceReference); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_api_resource_proto_rawDesc,
+ NumEnums: 2,
+ NumMessages: 2,
+ NumExtensions: 3,
+ NumServices: 0,
+ },
+ GoTypes: file_google_api_resource_proto_goTypes,
+ DependencyIndexes: file_google_api_resource_proto_depIdxs,
+ EnumInfos: file_google_api_resource_proto_enumTypes,
+ MessageInfos: file_google_api_resource_proto_msgTypes,
+ ExtensionInfos: file_google_api_resource_proto_extTypes,
+ }.Build()
+ File_google_api_resource_proto = out.File
+ file_google_api_resource_proto_rawDesc = nil
+ file_google_api_resource_proto_goTypes = nil
+ file_google_api_resource_proto_depIdxs = nil
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go
new file mode 100644
index 000000000..2b54db304
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go
@@ -0,0 +1,693 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v4.24.4
+// source: google/api/routing.proto
+
+package annotations
+
+import (
+ reflect "reflect"
+ sync "sync"
+
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Specifies the routing information that should be sent along with the request
+// in the form of routing header.
+// **NOTE:** All service configuration rules follow the "last one wins" order.
+//
+// The examples below will apply to an RPC which has the following request type:
+//
+// Message Definition:
+//
+// message Request {
+// // The name of the Table
+// // Values can be of the following formats:
+// // - `projects//tables/