diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 9e67cd25c..156ae32e6 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -151,7 +151,7 @@ Please follow this style to make the operator-controller project easier to revie ### Go version -Our goal is to minimize disruption by requiring the lowest possible Go language version. This means avoiding updaties to the go version specified in [go.mod](go.mod) (and other locations). +Our goal is to minimize disruption by requiring the lowest possible Go language version. This means avoiding updaties to the go version specified in the project's `go.mod` file (and other locations). There is a GitHub PR CI job named `go-verdiff` that will inform a PR author if the Go language version has been updated. It is not a required test, but failures should prompt authors and reviewers to have a discussion with the community about the Go language version change. diff --git a/Makefile b/Makefile index 08d43d6dd..d433b24b9 100644 --- a/Makefile +++ b/Makefile @@ -483,7 +483,7 @@ VENVDIR := $(abspath docs/.venv) .PHONY: build-docs build-docs: venv . $(VENV)/activate; \ - mkdocs build + mkdocs build --strict .PHONY: serve-docs serve-docs: venv @@ -493,7 +493,7 @@ serve-docs: venv .PHONY: deploy-docs deploy-docs: venv . $(VENV)/activate; \ - mkdocs gh-deploy --force + mkdocs gh-deploy --force --strict # The demo script requires to install asciinema with: brew install asciinema to run on mac os envs. # Please ensure that all demos are named with the demo name and the suffix -demo-script.sh diff --git a/README.md b/README.md index 783276d9b..4be7f30d0 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,7 @@ +[![unit-test](https://github.com/operator-framework/operator-controller/actions/workflows/unit-test.yaml/badge.svg)](https://github.com/operator-framework/operator-controller/actions/workflows/unit-test.yaml) +[![e2e](https://github.com/operator-framework/operator-controller/actions/workflows/e2e.yaml/badge.svg)](https://github.com/operator-framework/operator-controller/actions/workflows/e2e.yaml) +[![codecov](https://codecov.io/gh/operator-framework/operator-controller/graph/badge.svg?token=5f34zaWaN7)](https://codecov.io/gh/operator-framework/operator-controller) + # operator-controller The operator-controller is the central component of Operator Lifecycle Manager (OLM) v1. It extends Kubernetes with an API through which users can install extensions. diff --git a/api/v1/clusterextension_types.go b/api/v1/clusterextension_types.go index 0141f1a7a..a2dd890c3 100644 --- a/api/v1/clusterextension_types.go +++ b/api/v1/clusterextension_types.go @@ -17,6 +17,7 @@ limitations under the License. package v1 import ( + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -25,6 +26,8 @@ var ClusterExtensionKind = "ClusterExtension" type ( UpgradeConstraintPolicy string CRDUpgradeSafetyEnforcement string + + ClusterExtensionConfigType string ) const ( @@ -39,6 +42,8 @@ const ( // Use with caution as this can lead to unknown and potentially // disastrous results such as data loss. UpgradeConstraintPolicySelfCertified UpgradeConstraintPolicy = "SelfCertified" + + ClusterExtensionConfigTypeInline ClusterExtensionConfigType = "Inline" ) // ClusterExtensionSpec defines the desired state of ClusterExtension @@ -92,6 +97,15 @@ type ClusterExtensionSpec struct { // // +optional Install *ClusterExtensionInstallConfig `json:"install,omitempty"` + + // config contains optional configuration values applied during rendering of the + // ClusterExtension's manifests. Values can be specified inline. + // + // config is optional. When not specified, the default configuration of the resolved bundle will be used. + // + // + // +optional + Config *ClusterExtensionConfig `json:"config,omitempty"` } const SourceTypeCatalog = "Catalog" @@ -138,6 +152,34 @@ type ClusterExtensionInstallConfig struct { Preflight *PreflightConfig `json:"preflight,omitempty"` } +// ClusterExtensionConfig is a discriminated union which selects the source configuration values to be merged into +// the ClusterExtension's rendered manifests. +// +// +kubebuilder:validation:XValidation:rule="has(self.configType) && self.configType == 'Inline' ?has(self.inline) : !has(self.inline)",message="inline is required when configType is Inline, and forbidden otherwise" +// +union +type ClusterExtensionConfig struct { + // configType is a required reference to the type of configuration source. + // + // Allowed values are "Inline" + // + // When this field is set to "Inline", the cluster extension configuration is defined inline within the + // ClusterExtension resource. + // + // +unionDiscriminator + // +kubebuilder:validation:Enum:="Inline" + // +kubebuilder:validation:Required + ConfigType ClusterExtensionConfigType `json:"configType"` + + // inline contains JSON or YAML values specified directly in the + // ClusterExtension. + // + // inline must be set if configType is 'Inline'. + // + // +kubebuilder:validation:Type=object + // +optional + Inline *apiextensionsv1.JSON `json:"inline,omitempty"` +} + // CatalogFilter defines the attributes used to identify and filter content from a catalog. type CatalogFilter struct { // packageName is a reference to the name of the package to be installed diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index 23fcf7d85..01ad99562 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -252,6 +252,25 @@ func (in *ClusterExtension) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterExtensionConfig) DeepCopyInto(out *ClusterExtensionConfig) { + *out = *in + if in.Inline != nil { + in, out := &in.Inline, &out.Inline + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterExtensionConfig. +func (in *ClusterExtensionConfig) DeepCopy() *ClusterExtensionConfig { + if in == nil { + return nil + } + out := new(ClusterExtensionConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterExtensionInstallConfig) DeepCopyInto(out *ClusterExtensionInstallConfig) { *out = *in @@ -330,6 +349,11 @@ func (in *ClusterExtensionSpec) DeepCopyInto(out *ClusterExtensionSpec) { *out = new(ClusterExtensionInstallConfig) (*in).DeepCopyInto(*out) } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = new(ClusterExtensionConfig) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterExtensionSpec. diff --git a/commitchecker.yaml b/commitchecker.yaml index 2a1f38a43..669955b89 100644 --- a/commitchecker.yaml +++ b/commitchecker.yaml @@ -1,4 +1,4 @@ -expectedMergeBase: 07bd008eaadcebf79415cb360205c73b420666e1 +expectedMergeBase: b4aeb921ef4cdfd7d4ae49da78ee62c313e8bbac upstreamBranch: main upstreamOrg: operator-framework upstreamRepo: operator-controller diff --git a/config/base/operator-controller/crd/experimental/olm.operatorframework.io_clusterextensions.yaml b/config/base/operator-controller/crd/experimental/olm.operatorframework.io_clusterextensions.yaml index 162683603..ac24fe1b6 100644 --- a/config/base/operator-controller/crd/experimental/olm.operatorframework.io_clusterextensions.yaml +++ b/config/base/operator-controller/crd/experimental/olm.operatorframework.io_clusterextensions.yaml @@ -57,6 +57,40 @@ spec: description: spec is an optional field that defines the desired state of the ClusterExtension. properties: + config: + description: |- + config contains optional configuration values applied during rendering of the + ClusterExtension's manifests. Values can be specified inline. + + config is optional. When not specified, the default configuration of the resolved bundle will be used. + properties: + configType: + description: |- + configType is a required reference to the type of configuration source. + + Allowed values are "Inline" + + When this field is set to "Inline", the cluster extension configuration is defined inline within the + ClusterExtension resource. + enum: + - Inline + type: string + inline: + description: |- + inline contains JSON or YAML values specified directly in the + ClusterExtension. + + inline must be set if configType is 'Inline'. + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - configType + type: object + x-kubernetes-validations: + - message: inline is required when configType is Inline, and forbidden + otherwise + rule: 'has(self.configType) && self.configType == ''Inline'' ?has(self.inline) + : !has(self.inline)' install: description: |- install is an optional field used to configure the installation options diff --git a/docs/api-reference/olmv1-api-reference.md b/docs/api-reference/olmv1-api-reference.md index 84fdbfa64..8aaa37e84 100644 --- a/docs/api-reference/olmv1-api-reference.md +++ b/docs/api-reference/olmv1-api-reference.md @@ -239,6 +239,40 @@ _Appears in:_ | `status` _[ClusterExtensionStatus](#clusterextensionstatus)_ | status is an optional field that defines the observed state of the ClusterExtension. | | | +#### ClusterExtensionConfig + + + +ClusterExtensionConfig is a discriminated union which selects the source configuration values to be merged into +the ClusterExtension's rendered manifests. + + + +_Appears in:_ +- [ClusterExtensionSpec](#clusterextensionspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `configType` _[ClusterExtensionConfigType](#clusterextensionconfigtype)_ | configType is a required reference to the type of configuration source.

Allowed values are "Inline"

When this field is set to "Inline", the cluster extension configuration is defined inline within the
ClusterExtension resource. | | Enum: [Inline]
Required: \{\}
| +| `inline` _[JSON](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#json-v1-apiextensions-k8s-io)_ | inline contains JSON or YAML values specified directly in the
ClusterExtension.

inline must be set if configType is 'Inline'. | | Type: object
| + + +#### ClusterExtensionConfigType + +_Underlying type:_ _string_ + + + + + +_Appears in:_ +- [ClusterExtensionConfig](#clusterextensionconfig) + +| Field | Description | +| --- | --- | +| `Inline` | | + + #### ClusterExtensionInstallConfig @@ -309,6 +343,7 @@ _Appears in:_ | `serviceAccount` _[ServiceAccountReference](#serviceaccountreference)_ | serviceAccount is a reference to a ServiceAccount used to perform all interactions
with the cluster that are required to manage the extension.
The ServiceAccount must be configured with the necessary permissions to perform these interactions.
The ServiceAccount must exist in the namespace referenced in the spec.
serviceAccount is required. | | Required: \{\}
| | `source` _[SourceConfig](#sourceconfig)_ | source is a required field which selects the installation source of content
for this ClusterExtension. Selection is performed by setting the sourceType.

Catalog is currently the only implemented sourceType, and setting the
sourcetype to "Catalog" requires the catalog field to also be defined.

Below is a minimal example of a source definition (in yaml):

source:
sourceType: Catalog
catalog:
packageName: example-package | | Required: \{\}
| | `install` _[ClusterExtensionInstallConfig](#clusterextensioninstallconfig)_ | install is an optional field used to configure the installation options
for the ClusterExtension such as the pre-flight check configuration. | | | +| `config` _[ClusterExtensionConfig](#clusterextensionconfig)_ | config contains optional configuration values applied during rendering of the
ClusterExtension's manifests. Values can be specified inline.

config is optional. When not specified, the default configuration of the resolved bundle will be used.

| | | #### ClusterExtensionStatus diff --git a/docs/draft/api-reference/catalogd-webserver-metas-endpoint.md b/docs/draft/api-reference/catalogd-webserver-metas-endpoint.md index 6b27ba27e..eb70149da 100644 --- a/docs/draft/api-reference/catalogd-webserver-metas-endpoint.md +++ b/docs/draft/api-reference/catalogd-webserver-metas-endpoint.md @@ -31,7 +31,7 @@ As an example, to access only the [package schema](https://olm.operatorframework the URL to access the service would be `https://catalogd-service.olmv1-system.svc/catalogs/operatorhubio/api/v1/metas?schema=olm.package` -For more examples of valid queries that can be made to the `api/v1/metas` service endpoint, please see [Catalog Queries](../howto/catalog-queries.md). +For more examples of valid queries that can be made to the `api/v1/metas` service endpoint, please see [Catalog Queries](../../howto/catalog-queries.md). !!! note diff --git a/docs/draft/howto/catalog-queries-metas-endpoint.md b/docs/draft/howto/catalog-queries-metas-endpoint.md index f723d504b..25b45a7a4 100644 --- a/docs/draft/howto/catalog-queries-metas-endpoint.md +++ b/docs/draft/howto/catalog-queries-metas-endpoint.md @@ -1,6 +1,6 @@ # Catalog queries -After you [add a catalog of extensions](../tutorials/add-catalog.md) to your cluster, you must port forward your catalog as a service. +After you [add a catalog of extensions](../../tutorials/add-catalog.md) to your cluster, you must port forward your catalog as a service. Then you can query the catalog by using `curl` commands and the `jq` CLI tool to find extensions to install. ## Prerequisites diff --git a/docs/draft/howto/single-ownnamespace-install.md b/docs/draft/howto/single-ownnamespace-install.md index 2f440f32d..866bf1da5 100644 --- a/docs/draft/howto/single-ownnamespace-install.md +++ b/docs/draft/howto/single-ownnamespace-install.md @@ -56,11 +56,11 @@ kubectl rollout status -n olmv1-system deployment/operator-controller-controller ## Configuring the `ClusterExtension` A `ClusterExtension` can be configured to install bundle in `Single-` or `OwnNamespace` mode through the -`olm.operatorframework.io/watch-namespace: ` annotation. The *installMode* is inferred in the following way: +`.spec.config.inline.watchNamespace` property. The *installMode* is inferred in the following way: - - *AllNamespaces*: `` is empty, or the annotation is not present - - *OwnNamespace*: `` is the install namespace (i.e. `.spec.namespace`) - - *SingleNamespace*: `` not the install namespace + - *AllNamespaces*: `watchNamespace` is empty, or not set + - *OwnNamespace*: `watchNamespace` is the install namespace (i.e. `.spec.namespace`) + - *SingleNamespace*: `watchNamespace` *not* the install namespace ### Examples @@ -70,12 +70,13 @@ apiVersion: olm.operatorframework.io/v1 kind: ClusterExtension metadata: name: argocd - annotations: - olm.operatorframework.io/watch-namespace: argocd-watch spec: namespace: argocd serviceAccount: name: argocd-installer + config: + inline: + watchNamespace: argocd-watch source: sourceType: Catalog catalog: @@ -96,6 +97,9 @@ spec: namespace: argocd serviceAccount: name: argocd-installer + config: + inline: + watchNamespace: argocd source: sourceType: Catalog catalog: diff --git a/docs/draft/tutorials/explore-available-content-metas-endpoint.md b/docs/draft/tutorials/explore-available-content-metas-endpoint.md index 8ece0a75d..70cb87424 100644 --- a/docs/draft/tutorials/explore-available-content-metas-endpoint.md +++ b/docs/draft/tutorials/explore-available-content-metas-endpoint.md @@ -5,7 +5,7 @@ hide: # Explore Available Content -After you [add a catalog of extensions](add-catalog.md) to your cluster, you must port forward your catalog as a service. +After you [add a catalog of extensions](../../tutorials/add-catalog.md) to your cluster, you must port forward your catalog as a service. Then you can query the catalog by using `curl` commands and the `jq` CLI tool to find extensions to install. ## Prerequisites @@ -144,4 +144,4 @@ Then you can query the catalog by using `curl` commands and the `jq` CLI tool to ### Additional resources -* [Catalog queries](../howto/catalog-queries.md) +* [Catalog queries](../../howto/catalog-queries.md) diff --git a/go.mod b/go.mod index 7f385942d..33751eb4e 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/blang/semver/v4 v4.0.0 github.com/cert-manager/cert-manager v1.18.2 github.com/containerd/containerd v1.7.28 - github.com/containers/image/v5 v5.36.1 + github.com/containers/image/v5 v5.36.2 github.com/fsnotify/fsnotify v1.9.0 github.com/go-logr/logr v1.4.3 github.com/golang-jwt/jwt/v5 v5.3.0 @@ -19,32 +19,32 @@ require ( github.com/klauspost/compress v1.18.0 github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.1.1 - github.com/operator-framework/api v0.33.0 + github.com/operator-framework/api v0.34.0 github.com/operator-framework/helm-operator-plugins v0.8.0 - github.com/operator-framework/operator-registry v1.56.0 - github.com/prometheus/client_golang v1.23.0 - github.com/prometheus/common v0.65.0 - github.com/spf13/cobra v1.9.1 - github.com/stretchr/testify v1.10.0 + github.com/operator-framework/operator-registry v1.57.0 + github.com/prometheus/client_golang v1.23.1 + github.com/prometheus/common v0.66.0 + github.com/spf13/cobra v1.10.1 + github.com/stretchr/testify v1.11.1 golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b golang.org/x/mod v0.27.0 golang.org/x/sync v0.16.0 golang.org/x/tools v0.36.0 gopkg.in/yaml.v2 v2.4.0 helm.sh/helm/v3 v3.18.6 - k8s.io/api v0.33.3 - k8s.io/apiextensions-apiserver v0.33.3 - k8s.io/apimachinery v0.33.3 - k8s.io/apiserver v0.33.3 + k8s.io/api v0.33.4 + k8s.io/apiextensions-apiserver v0.33.4 + k8s.io/apimachinery v0.33.4 + k8s.io/apiserver v0.33.4 k8s.io/cli-runtime v0.33.3 - k8s.io/client-go v0.33.3 - k8s.io/component-base v0.33.3 + k8s.io/client-go v0.33.4 + k8s.io/component-base v0.33.4 k8s.io/klog/v2 v2.130.1 k8s.io/kubernetes v1.33.2 k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 sigs.k8s.io/controller-runtime v0.21.0 sigs.k8s.io/controller-tools v0.18.0 - sigs.k8s.io/crdify v0.4.1-0.20250613143457-398e4483fb58 + sigs.k8s.io/crdify v0.5.0 sigs.k8s.io/yaml v1.6.0 ) @@ -82,7 +82,7 @@ require ( github.com/containerd/stargz-snapshotter/estargz v0.16.3 // indirect github.com/containerd/ttrpc v1.2.7 // indirect github.com/containerd/typeurl/v2 v2.2.3 // indirect - github.com/containers/common v0.63.1 // indirect + github.com/containers/common v0.64.1 // indirect github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect github.com/containers/ocicrypt v1.2.1 // indirect github.com/containers/storage v1.59.1 // indirect @@ -90,7 +90,7 @@ require ( github.com/cyphar/filepath-securejoin v0.4.1 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/distribution/reference v0.6.0 // indirect - github.com/docker/cli v28.3.2+incompatible // indirect + github.com/docker/cli v28.3.3+incompatible // indirect github.com/docker/distribution v2.8.3+incompatible // indirect github.com/docker/docker v28.3.3+incompatible // indirect github.com/docker/docker-credential-helpers v0.9.3 // indirect @@ -108,7 +108,7 @@ require ( github.com/go-git/go-billy/v5 v5.6.2 // indirect github.com/go-git/go-git/v5 v5.16.2 // indirect github.com/go-gorp/gorp/v3 v3.1.0 // indirect - github.com/go-jose/go-jose/v4 v4.1.0 // indirect + github.com/go-jose/go-jose/v4 v4.1.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/jsonpointer v0.21.1 // indirect github.com/go-openapi/jsonreference v0.21.0 // indirect @@ -126,6 +126,7 @@ require ( github.com/gorilla/mux v1.8.1 // indirect github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect github.com/gosuri/uitable v0.0.4 // indirect + github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.0 // indirect github.com/h2non/filetype v1.1.3 // indirect @@ -149,7 +150,7 @@ require ( github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-runewidth v0.0.16 // indirect - github.com/mattn/go-sqlite3 v1.14.28 // indirect + github.com/mattn/go-sqlite3 v1.14.32 // indirect github.com/miekg/pkcs11 v1.1.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect @@ -168,7 +169,7 @@ require ( github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect - github.com/onsi/gomega v1.37.0 // indirect + github.com/onsi/gomega v1.38.2 // indirect github.com/opencontainers/runtime-spec v1.2.1 // indirect github.com/operator-framework/operator-lib v0.17.0 // indirect github.com/otiai10/copy v1.14.1 // indirect @@ -191,29 +192,29 @@ require ( github.com/sirupsen/logrus v1.9.3 // indirect github.com/smallstep/pkcs7 v0.2.1 // indirect github.com/spf13/cast v1.7.1 // indirect - github.com/spf13/pflag v1.0.7 // indirect + github.com/spf13/pflag v1.0.9 // indirect github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 // indirect github.com/stoewer/go-strcase v1.3.1 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect - github.com/ulikunitz/xz v0.5.12 // indirect + github.com/ulikunitz/xz v0.5.14 // indirect github.com/vbatts/tar-split v0.12.1 // indirect github.com/vbauerster/mpb/v8 v8.10.2 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xlab/treeprint v1.2.0 // indirect - go.etcd.io/bbolt v1.4.2 // indirect + go.etcd.io/bbolt v1.4.3 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect - go.opentelemetry.io/otel v1.36.0 // indirect + go.opentelemetry.io/otel v1.37.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0 // indirect - go.opentelemetry.io/otel/metric v1.36.0 // indirect - go.opentelemetry.io/otel/sdk v1.36.0 // indirect - go.opentelemetry.io/otel/trace v1.36.0 // indirect + go.opentelemetry.io/otel/metric v1.37.0 // indirect + go.opentelemetry.io/otel/sdk v1.37.0 // indirect + go.opentelemetry.io/otel/trace v1.37.0 // indirect go.opentelemetry.io/proto/otlp v1.7.0 // indirect go.yaml.in/yaml/v2 v2.4.2 // indirect - go.yaml.in/yaml/v3 v3.0.3 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/crypto v0.41.0 // indirect golang.org/x/net v0.43.0 // indirect golang.org/x/oauth2 v0.30.0 // indirect @@ -224,10 +225,10 @@ require ( golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/genproto v0.0.0-20250603155806-513f23925822 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect - google.golang.org/grpc v1.73.0 // indirect - google.golang.org/protobuf v1.36.6 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 // indirect + google.golang.org/grpc v1.75.0 // indirect + google.golang.org/protobuf v1.36.8 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect diff --git a/go.sum b/go.sum index f66d08458..285351c7d 100644 --- a/go.sum +++ b/go.sum @@ -77,10 +77,10 @@ github.com/containerd/ttrpc v1.2.7 h1:qIrroQvuOL9HQ1X6KHe2ohc7p+HP/0VE6XPU7elJRq github.com/containerd/ttrpc v1.2.7/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o= github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++dYSw40= github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk= -github.com/containers/common v0.63.1 h1:6g02gbW34PaRVH4Heb2Pk11x0SdbQ+8AfeKKeQGqYBE= -github.com/containers/common v0.63.1/go.mod h1:+3GCotSqNdIqM3sPs152VvW7m5+Mg8Kk+PExT3G9hZw= -github.com/containers/image/v5 v5.36.1 h1:6zpXBqR59UcAzoKpa/By5XekeqFV+htWYfr65+Cgjqo= -github.com/containers/image/v5 v5.36.1/go.mod h1:b4GMKH2z/5t6/09utbse2ZiLK/c72GuGLFdp7K69eA4= +github.com/containers/common v0.64.1 h1:E8vSiL+B84/UCsyVSb70GoxY9cu+0bseLujm4EKF6GE= +github.com/containers/common v0.64.1/go.mod h1:CtfQNHoCAZqWeXMwdShcsxmMJSeGRgKKMqAwRKmWrHE= +github.com/containers/image/v5 v5.36.2 h1:GcxYQyAHRF/pLqR4p4RpvKllnNL8mOBn0eZnqJbfTwk= +github.com/containers/image/v5 v5.36.2/go.mod h1:b4GMKH2z/5t6/09utbse2ZiLK/c72GuGLFdp7K69eA4= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/ocicrypt v1.2.1 h1:0qIOTT9DoYwcKmxSt8QJt+VzMY18onl9jUXsxpVhSmM= @@ -110,8 +110,8 @@ github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5Qvfr github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI= github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= -github.com/docker/cli v28.3.2+incompatible h1:mOt9fcLE7zaACbxW1GeS65RI67wIJrTnqS3hP2huFsY= -github.com/docker/cli v28.3.2+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v28.3.3+incompatible h1:fp9ZHAr1WWPGdIWBM1b3zLtgCF+83gRdVMTJsUeiyAo= +github.com/docker/cli v28.3.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v28.3.3+incompatible h1:Dypm25kh4rmk49v1eiVbsAtpAsYURjYkaKubwuBdxEI= @@ -160,8 +160,8 @@ github.com/go-git/go-git/v5 v5.16.2 h1:fT6ZIOjE5iEnkzKyxTHK1W4HGAsPhqEqiSAssSO77 github.com/go-git/go-git/v5 v5.16.2/go.mod h1:4Ge4alE/5gPs30F2H1esi2gPd69R0C39lolkucHBOp8= github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs= github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw= -github.com/go-jose/go-jose/v4 v4.1.0 h1:cYSYxd3pw5zd2FSXk2vGdn9igQU2PS8MuxrCOCl0FdY= -github.com/go-jose/go-jose/v4 v4.1.0/go.mod h1:GG/vqmYm3Von2nYiB2vGTXzdoNKE5tix5tuc6iAd+sw= +github.com/go-jose/go-jose/v4 v4.1.1 h1:JYhSgy4mXXzAdF3nUx3ygx347LRXJRrpgyU3adRmkAI= +github.com/go-jose/go-jose/v4 v4.1.1/go.mod h1:BdsZGqgdO3b6tTc6LSE56wcDbMMLuPsw5d4ZD5f94kA= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -231,8 +231,8 @@ github.com/google/go-containerregistry v0.20.6/go.mod h1:T0x8MuoAoKX/873bkeSfLD2 github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a h1://KbezygeMJZCSHH+HgUZiTeSoiuFspbMg1ge+eFj18= -github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA= +github.com/google/pprof v0.0.0-20250820193118-f64d9cf942d6 h1:EEHtgt9IwisQ2AZ4pIsMjahcegHh6rmhqxzIRQIyepY= +github.com/google/pprof v0.0.0-20250820193118-f64d9cf942d6/go.mod h1:I6V7YzU0XDpsHqbsyrghnFZLO1gwK6NPTNvmetQIk9U= github.com/google/renameio/v2 v2.0.0 h1:UifI23ZTGY8Tt29JbYFiuyIU3eX+RNFtUwefq9qAhxg= github.com/google/renameio/v2 v2.0.0/go.mod h1:BtmJXm5YlszgC+TD4HOEEUFgkJP3nLxehU6hfe7jRt4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= @@ -248,6 +248,8 @@ github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5T github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY= github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= +github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248= +github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20210315223345-82c243799c99 h1:JYghRBlGCZyCF2wNUJ8W0cwaQdtpcssJ4CgC406g+WU= @@ -317,8 +319,8 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= -github.com/mattn/go-sqlite3 v1.14.28 h1:ThEiQrnbtumT+QMknw63Befp/ce/nUPgBPMlRFEum7A= -github.com/mattn/go-sqlite3 v1.14.28/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/mattn/go-sqlite3 v1.14.32 h1:JD12Ag3oLy1zQA+BNn74xRgaBbdhbNIDYvQUEuuErjs= +github.com/mattn/go-sqlite3 v1.14.32/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ= github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ= github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= @@ -364,24 +366,24 @@ github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus= -github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8= -github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y= -github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= +github.com/onsi/ginkgo/v2 v2.25.2 h1:hepmgwx1D+llZleKQDMEvy8vIlCxMGt7W5ZxDjIEhsw= +github.com/onsi/ginkgo/v2 v2.25.2/go.mod h1:43uiyQC4Ed2tkOzLsEYm7hnrb7UJTWHYNsuy3bG/snE= +github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= +github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= github.com/opencontainers/runtime-spec v1.2.1 h1:S4k4ryNgEpxW1dzyqffOmhI1BHYcjzU8lpJfSlR0xww= github.com/opencontainers/runtime-spec v1.2.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/operator-framework/api v0.33.0 h1:Tdu9doXz6Key2riIiP3/JPahHEgFBXAqyWQN4kOITS8= -github.com/operator-framework/api v0.33.0/go.mod h1:sEh1VqwQCJUj+l/rKNWPDEJdFNAbdTu8QcM+x+wdYYo= +github.com/operator-framework/api v0.34.0 h1:REiEaYhG1CWmDoajdcAdZqtgoljWG+ixMY59vUX5pFI= +github.com/operator-framework/api v0.34.0/go.mod h1:eGncUNIYvWtfGCCKmLzGXvoi3P0TDf3Yd/Z0Sn9E6SQ= github.com/operator-framework/helm-operator-plugins v0.8.0 h1:0f6HOQC5likkf0b/OvGvw7nhDb6h8Cj5twdCNjwNzMc= github.com/operator-framework/helm-operator-plugins v0.8.0/go.mod h1:Sc+8bE38xTCgCChBUvtq/PxatEg9fAypr7S5iAw8nlA= github.com/operator-framework/operator-lib v0.17.0 h1:cbz51wZ9+GpWR1ZYP4CSKSSBxDlWxmmnseaHVZZjZt4= github.com/operator-framework/operator-lib v0.17.0/go.mod h1:TGopBxIE8L6E/Cojzo26R3NFp1eNlqhQNmzqhOblaLw= -github.com/operator-framework/operator-registry v1.56.0 h1:vbTyee/gahpnh7qw1hV1osnWy9YpTjIbEuHpwIdoEUs= -github.com/operator-framework/operator-registry v1.56.0/go.mod h1:NOmQyrgOGW0cwUxHG5ZqKxdObOzQNmO4Rxcf7JC32FU= +github.com/operator-framework/operator-registry v1.57.0 h1:mQ4c8A8VUxZPJ0QCFRNio+7JEsLX6eKxlDSl6ORCRdk= +github.com/operator-framework/operator-registry v1.57.0/go.mod h1:9rAZH/LZ/ttEuTvL1D4KApGqOtRDE6fJzzOrJNcBu7g= github.com/otiai10/copy v1.14.1 h1:5/7E6qsUMBaH5AnQ0sSLzzTg1oTECmcCmT6lvF45Na8= github.com/otiai10/copy v1.14.1/go.mod h1:oQwrEDDOci3IM8dJF0d8+jnbfPDllW6vUjNc3DoZm9I= github.com/otiai10/mint v1.6.3 h1:87qsV/aw1F5as1eH1zS/yqHY85ANKVMgkDrf9rcxbQs= @@ -399,13 +401,13 @@ github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY= github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg= github.com/proglottis/gpgme v0.1.4 h1:3nE7YNA70o2aLjcg63tXMOhPD7bplfE5CBdV+hLAm2M= github.com/proglottis/gpgme v0.1.4/go.mod h1:5LoXMgpE4bttgwwdv9bLs/vwqv3qV7F4glEEZ7mRKrM= -github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc= -github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE= +github.com/prometheus/client_golang v1.23.1 h1:w6gXMLQGgd0jXXlote9lRHMe0nG01EbnJT+C0EJru2Y= +github.com/prometheus/client_golang v1.23.1/go.mod h1:br8j//v2eg2K5Vvna5klK8Ku5pcU5r4ll73v6ik5dIQ= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= -github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE= -github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= +github.com/prometheus/common v0.66.0 h1:K/rJPHrG3+AoQs50r2+0t7zMnMzek2Vbv31OFVsMeVY= +github.com/prometheus/common v0.66.0/go.mod h1:Ux6NtV1B4LatamKE63tJBntoxD++xmtI/lK0VtEplN4= github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= github.com/redis/go-redis/extra/rediscmd/v9 v9.10.0 h1:uTiEyEyfLhkw678n6EulHVto8AkcXVr8zUcBJNZ0ark= @@ -443,11 +445,10 @@ github.com/smallstep/pkcs7 v0.2.1 h1:6Kfzr/QizdIuB6LSv8y1LJdZ3aPSfTNhTLqAx9CTLfA github.com/smallstep/pkcs7 v0.2.1/go.mod h1:RcXHsMfL+BzH8tRhmrF1NkkpebKpq3JEM66cOFxanf0= github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= -github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= -github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/pflag v1.0.7 h1:vN6T9TfwStFPFM5XzjsvmzZkLuaLX+HS+0SeFLRgU6M= -github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= +github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= +github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 h1:pnnLyeX7o/5aX8qUQ69P/mLojDqwda8hFOCBTmP/6hw= github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6/go.mod h1:39R/xuhNgVhi+K0/zst4TLrJrVmbm6LVgl4A0+ZFS5M= github.com/stoewer/go-strcase v1.3.1 h1:iS0MdW+kVTxgMoE1LAZyMiYJFKlOzLooE4MxjirtkAs= @@ -464,12 +465,12 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= -github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc= -github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulikunitz/xz v0.5.14 h1:uv/0Bq533iFdnMHZdRBTOlaNMdb1+ZxXIlHDZHIHcvg= +github.com/ulikunitz/xz v0.5.14/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/vbatts/tar-split v0.12.1 h1:CqKoORW7BUWBe7UL/iqTVvkTBOF8UvOMKOIZykxnnbo= github.com/vbatts/tar-split v0.12.1/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= github.com/vbauerster/mpb/v8 v8.10.2 h1:2uBykSHAYHekE11YvJhKxYmLATKHAGorZwFlyNw4hHM= @@ -481,8 +482,8 @@ github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.etcd.io/bbolt v1.4.2 h1:IrUHp260R8c+zYx/Tm8QZr04CX+qWS5PGfPdevhdm1I= -go.etcd.io/bbolt v1.4.2/go.mod h1:Is8rSHO/b4f3XigBC0lL0+4FwAQv3HXEEIgFMuKHceM= +go.etcd.io/bbolt v1.4.3 h1:dEadXpI6G79deX5prL3QRNP6JB8UxVkqo4UPnHaNXJo= +go.etcd.io/bbolt v1.4.3/go.mod h1:tKQlpPaYCVFctUIgFKFnAlvbmB3tpy1vkTnDWohtc0E= go.etcd.io/etcd/api/v3 v3.5.21 h1:A6O2/JDb3tvHhiIz3xf9nJ7REHvtEFJJ3veW3FbCnS8= go.etcd.io/etcd/api/v3 v3.5.21/go.mod h1:c3aH5wcvXv/9dqIw2Y810LDXJfhSYdHQ0vxmP3CCHVY= go.etcd.io/etcd/client/pkg/v3 v3.5.21 h1:lPBu71Y7osQmzlflM9OfeIV2JlmpBjqBNlLtcoBqUTc= @@ -501,8 +502,8 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.6 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0/go.mod h1:snMWehoOh2wsEwnvvwtDyFCxVeDAODenXHtn5vzrKjo= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= -go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg= -go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E= +go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= +go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.12.2 h1:06ZeJRe5BnYXceSM9Vya83XXVaNGe3H1QqsvqRANQq8= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.12.2/go.mod h1:DvPtKE63knkDVP88qpatBj81JxN+w1bqfVbsbCbj1WY= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.12.2 h1:tPLwQlXbJ8NSOfZc4OkgU5h2A38M4c9kfHSVc4PFQGs= @@ -527,16 +528,16 @@ go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.36.0 h1:G8Xec/SgZQricwW go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.36.0/go.mod h1:PD57idA/AiFD5aqoxGxCvT/ILJPeHy3MjqU/NS7KogY= go.opentelemetry.io/otel/log v0.12.2 h1:yob9JVHn2ZY24byZeaXpTVoPS6l+UrrxmxmPKohXTwc= go.opentelemetry.io/otel/log v0.12.2/go.mod h1:ShIItIxSYxufUMt+1H5a2wbckGli3/iCfuEbVZi/98E= -go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE= -go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs= -go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs= -go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY= +go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= +go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= +go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI= +go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg= go.opentelemetry.io/otel/sdk/log v0.12.2 h1:yNoETvTByVKi7wHvYS6HMcZrN5hFLD7I++1xIZ/k6W0= go.opentelemetry.io/otel/sdk/log v0.12.2/go.mod h1:DcpdmUXHJgSqN/dh+XMWa7Vf89u9ap0/AAk/XGLnEzY= -go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis= -go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4= -go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w= -go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= +go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= +go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= +go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= +go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= go.opentelemetry.io/proto/otlp v1.7.0 h1:jX1VolD6nHuFzOYso2E73H85i92Mv8JQYk0K9vz09os= go.opentelemetry.io/proto/otlp v1.7.0/go.mod h1:fSKjH6YJ7HDlwzltzyMj036AJ3ejJLCgCSHGj4efDDo= go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= @@ -549,8 +550,8 @@ go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= -go.yaml.in/yaml/v3 v3.0.3 h1:bXOww4E/J3f66rav3pX3m8w6jDE4knZjGOw8b5Y6iNE= -go.yaml.in/yaml/v3 v3.0.3/go.mod h1:tBHosrYAkRZjRAOREWbDnBXUf08JOwYq++0QNwQiWzI= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -678,6 +679,8 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= @@ -685,17 +688,17 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20250603155806-513f23925822 h1:rHWScKit0gvAPuOnu87KpaYtjK5zBMLcULh7gxkCXu4= google.golang.org/genproto v0.0.0-20250603155806-513f23925822/go.mod h1:HubltRL7rMh0LfnQPkMH4NPDFEWp0jw3vixw7jEM53s= -google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 h1:oWVWY3NzT7KJppx2UKhKmzPq4SRe0LdCijVRwvGeikY= -google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822/go.mod h1:h3c4v36UTKzUiuaOKQ6gr3S+0hovBtUrXzTG/i3+XEc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7 h1:FiusG7LWj+4byqhbvmB+Q93B/mOxJLN2DTozDuZm4EU= +google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7/go.mod h1:kXqgZtrWaf6qS3jZOCnCH7WYfrvFjkC51bM8fz3RsCA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 h1:pFyd6EwwL2TqFf8emdthzeX+gZE1ElRq3iM8pui4KBY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok= -google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc= +google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4= +google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -705,8 +708,8 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= -google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= +google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= @@ -765,8 +768,8 @@ sigs.k8s.io/controller-runtime v0.21.0 h1:CYfjpEuicjUecRk+KAeyYh+ouUBn4llGyDYytI sigs.k8s.io/controller-runtime v0.21.0/go.mod h1:OSg14+F65eWqIu4DceX7k/+QRAbTTvxeQSNSOQpukWM= sigs.k8s.io/controller-tools v0.18.0 h1:rGxGZCZTV2wJreeRgqVoWab/mfcumTMmSwKzoM9xrsE= sigs.k8s.io/controller-tools v0.18.0/go.mod h1:gLKoiGBriyNh+x1rWtUQnakUYEujErjXs9pf+x/8n1U= -sigs.k8s.io/crdify v0.4.1-0.20250613143457-398e4483fb58 h1:VTvhbqgZMVoDpHHPuZLaOgzjjsJBhO8+vDKA1COuLCY= -sigs.k8s.io/crdify v0.4.1-0.20250613143457-398e4483fb58/go.mod h1:ZIFxaYNgKYmFtZCLPysncXQ8oqwnNlHQbRUfxJHZwzU= +sigs.k8s.io/crdify v0.5.0 h1:mrMH9CgXQPTZUpTU6Klqfnlys8bggv/7uvLT2lXSP7A= +sigs.k8s.io/crdify v0.5.0/go.mod h1:ZIFxaYNgKYmFtZCLPysncXQ8oqwnNlHQbRUfxJHZwzU= sigs.k8s.io/gateway-api v1.1.0 h1:DsLDXCi6jR+Xz8/xd0Z1PYl2Pn0TyaFMOPPZIj4inDM= sigs.k8s.io/gateway-api v1.1.0/go.mod h1:ZH4lHrL2sDi0FHZ9jjneb8kKnGzFWyrTya35sWUTrRs= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= diff --git a/internal/operator-controller/applier/helm.go b/internal/operator-controller/applier/helm.go index ecfb3fdc2..e34ad2b28 100644 --- a/internal/operator-controller/applier/helm.go +++ b/internal/operator-controller/applier/helm.go @@ -27,6 +27,7 @@ import ( ocv1 "github.com/operator-framework/operator-controller/api/v1" "github.com/operator-framework/operator-controller/internal/operator-controller/authorization" "github.com/operator-framework/operator-controller/internal/operator-controller/features" + "github.com/operator-framework/operator-controller/internal/operator-controller/rukpak/bundle" "github.com/operator-framework/operator-controller/internal/operator-controller/rukpak/bundle/source" "github.com/operator-framework/operator-controller/internal/operator-controller/rukpak/preflights/crdupgradesafety" "github.com/operator-framework/operator-controller/internal/operator-controller/rukpak/util" @@ -57,7 +58,7 @@ type Preflight interface { } type BundleToHelmChartConverter interface { - ToHelmChart(bundle source.BundleSource, installNamespace string, watchNamespace string) (*chart.Chart, error) + ToHelmChart(bundle source.BundleSource, installNamespace string, config map[string]interface{}) (*chart.Chart, error) } type Helm struct { @@ -222,7 +223,10 @@ func (h *Helm) buildHelmChart(bundleFS fs.FS, ext *ocv1.ClusterExtension) (*char } } - return h.BundleToHelmChartConverter.ToHelmChart(source.FromFS(bundleFS), ext.Spec.Namespace, watchNamespace) + bundleConfig := map[string]interface{}{ + bundle.BundleConfigWatchNamespaceKey: watchNamespace, + } + return h.BundleToHelmChartConverter.ToHelmChart(source.FromFS(bundleFS), ext.Spec.Namespace, bundleConfig) } func (h *Helm) renderClientOnlyRelease(ctx context.Context, ext *ocv1.ClusterExtension, chrt *chart.Chart, values chartutil.Values, post postrender.PostRenderer) (*release.Release, error) { diff --git a/internal/operator-controller/applier/helm_test.go b/internal/operator-controller/applier/helm_test.go index 89c94df88..1f2a3cfd2 100644 --- a/internal/operator-controller/applier/helm_test.go +++ b/internal/operator-controller/applier/helm_test.go @@ -3,6 +3,7 @@ package applier_test import ( "context" "errors" + "fmt" "io" "os" "testing" @@ -16,6 +17,7 @@ import ( "helm.sh/helm/v3/pkg/storage/driver" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" featuregatetesting "k8s.io/component-base/featuregate/testing" "sigs.k8s.io/controller-runtime/pkg/client" @@ -26,6 +28,7 @@ import ( "github.com/operator-framework/operator-controller/internal/operator-controller/applier" "github.com/operator-framework/operator-controller/internal/operator-controller/authorization" "github.com/operator-framework/operator-controller/internal/operator-controller/features" + registryv1Bundle "github.com/operator-framework/operator-controller/internal/operator-controller/rukpak/bundle" "github.com/operator-framework/operator-controller/internal/operator-controller/rukpak/bundle/source" "github.com/operator-framework/operator-controller/internal/operator-controller/rukpak/convert" ) @@ -557,8 +560,8 @@ func TestApply_InstallationWithSingleOwnNamespaceInstallSupportEnabled(t *testin }, }, BundleToHelmChartConverter: &fakeBundleToHelmChartConverter{ - fn: func(bundle source.BundleSource, installNamespace string, watchNamespace string) (*chart.Chart, error) { - require.Equal(t, expectedWatchNamespace, watchNamespace) + fn: func(bundle source.BundleSource, installNamespace string, config map[string]interface{}) (*chart.Chart, error) { + require.Equal(t, expectedWatchNamespace, config[registryv1Bundle.BundleConfigWatchNamespaceKey]) return nil, nil }, }, @@ -567,8 +570,13 @@ func TestApply_InstallationWithSingleOwnNamespaceInstallSupportEnabled(t *testin testExt := &ocv1.ClusterExtension{ ObjectMeta: metav1.ObjectMeta{ Name: "testExt", - Annotations: map[string]string{ - applier.AnnotationClusterExtensionWatchNamespace: expectedWatchNamespace, + }, + Spec: ocv1.ClusterExtensionSpec{ + Config: &ocv1.ClusterExtensionConfig{ + ConfigType: ocv1.ClusterExtensionConfigTypeInline, + Inline: &apiextensionsv1.JSON{ + Raw: []byte(fmt.Sprintf(`{"%s":"%s"}`, registryv1Bundle.BundleConfigWatchNamespaceKey, expectedWatchNamespace)), + }, }, }, } @@ -590,8 +598,8 @@ func TestApply_RegistryV1ToChartConverterIntegration(t *testing.T) { }, }, BundleToHelmChartConverter: &fakeBundleToHelmChartConverter{ - fn: func(bundle source.BundleSource, installNamespace string, watchNamespace string) (*chart.Chart, error) { - require.Equal(t, expectedWatchNamespace, watchNamespace) + fn: func(bundle source.BundleSource, installNamespace string, config map[string]interface{}) (*chart.Chart, error) { + require.Equal(t, expectedWatchNamespace, config[registryv1Bundle.BundleConfigWatchNamespaceKey]) return nil, nil }, }, @@ -610,7 +618,7 @@ func TestApply_RegistryV1ToChartConverterIntegration(t *testing.T) { }, }, BundleToHelmChartConverter: &fakeBundleToHelmChartConverter{ - fn: func(bundle source.BundleSource, installNamespace string, watchNamespace string) (*chart.Chart, error) { + fn: func(bundle source.BundleSource, installNamespace string, config map[string]interface{}) (*chart.Chart, error) { return nil, errors.New("some error") }, }, @@ -622,9 +630,9 @@ func TestApply_RegistryV1ToChartConverterIntegration(t *testing.T) { } type fakeBundleToHelmChartConverter struct { - fn func(source.BundleSource, string, string) (*chart.Chart, error) + fn func(source.BundleSource, string, map[string]interface{}) (*chart.Chart, error) } -func (f fakeBundleToHelmChartConverter) ToHelmChart(bundle source.BundleSource, installNamespace string, watchNamespace string) (*chart.Chart, error) { - return f.fn(bundle, installNamespace, watchNamespace) +func (f fakeBundleToHelmChartConverter) ToHelmChart(bundle source.BundleSource, installNamespace string, config map[string]interface{}) (*chart.Chart, error) { + return f.fn(bundle, installNamespace, config) } diff --git a/internal/operator-controller/applier/watchnamespace.go b/internal/operator-controller/applier/watchnamespace.go index 193f456b3..a89c95d29 100644 --- a/internal/operator-controller/applier/watchnamespace.go +++ b/internal/operator-controller/applier/watchnamespace.go @@ -1,9 +1,9 @@ package applier import ( + "encoding/json" "fmt" - corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/validation" ocv1 "github.com/operator-framework/operator-controller/api/v1" @@ -19,14 +19,28 @@ const ( // for registry+v1 bundles. This will go away once the ClusterExtension API is updated to include // (opaque) runtime configuration. func GetWatchNamespace(ext *ocv1.ClusterExtension) (string, error) { - if features.OperatorControllerFeatureGate.Enabled(features.SingleOwnNamespaceInstallSupport) { - if ext != nil && ext.Annotations[AnnotationClusterExtensionWatchNamespace] != "" { - watchNamespace := ext.Annotations[AnnotationClusterExtensionWatchNamespace] - if errs := validation.IsDNS1123Subdomain(watchNamespace); len(errs) > 0 { - return "", fmt.Errorf("invalid watch namespace '%s': namespace must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character", watchNamespace) - } - return ext.Annotations[AnnotationClusterExtensionWatchNamespace], nil + if !features.OperatorControllerFeatureGate.Enabled(features.SingleOwnNamespaceInstallSupport) { + return "", nil + } + + var watchNamespace string + if ext.Spec.Config != nil && ext.Spec.Config.Inline != nil { + cfg := struct { + WatchNamespace string `json:"watchNamespace"` + }{} + if err := json.Unmarshal(ext.Spec.Config.Inline.Raw, &cfg); err != nil { + return "", fmt.Errorf("invalid bundle configuration: %w", err) } + watchNamespace = cfg.WatchNamespace + } else if _, ok := ext.Annotations[AnnotationClusterExtensionWatchNamespace]; ok { + watchNamespace = ext.Annotations[AnnotationClusterExtensionWatchNamespace] + } else { + return "", nil } - return corev1.NamespaceAll, nil + + if errs := validation.IsDNS1123Subdomain(watchNamespace); len(errs) > 0 { + return "", fmt.Errorf("invalid watch namespace '%s': namespace must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character", watchNamespace) + } + + return watchNamespace, nil } diff --git a/internal/operator-controller/applier/watchnamespace_test.go b/internal/operator-controller/applier/watchnamespace_test.go index 90e018dc7..4745f3dc5 100644 --- a/internal/operator-controller/applier/watchnamespace_test.go +++ b/internal/operator-controller/applier/watchnamespace_test.go @@ -5,26 +5,31 @@ import ( "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" featuregatetesting "k8s.io/component-base/featuregate/testing" - v1 "github.com/operator-framework/operator-controller/api/v1" + ocv1 "github.com/operator-framework/operator-controller/api/v1" "github.com/operator-framework/operator-controller/internal/operator-controller/applier" "github.com/operator-framework/operator-controller/internal/operator-controller/features" ) func TestGetWatchNamespacesWhenFeatureGateIsDisabled(t *testing.T) { - watchNamespace, err := applier.GetWatchNamespace(&v1.ClusterExtension{ + watchNamespace, err := applier.GetWatchNamespace(&ocv1.ClusterExtension{ ObjectMeta: metav1.ObjectMeta{ Name: "extension", - Annotations: map[string]string{ - "olm.operatorframework.io/watch-namespace": "watch-namespace", + }, + Spec: ocv1.ClusterExtensionSpec{ + Config: &ocv1.ClusterExtensionConfig{ + ConfigType: ocv1.ClusterExtensionConfigTypeInline, + Inline: &apiextensionsv1.JSON{ + Raw: []byte(`{"watchNamespace":"watch-namespace"}`), + }, }, }, - Spec: v1.ClusterExtensionSpec{}, }) require.NoError(t, err) - t.Log("Check watchNamespace is '' even if the annotation is set") + t.Log("Check watchNamespace is '' even if the configuration is set") require.Equal(t, corev1.NamespaceAll, watchNamespace) } @@ -34,57 +39,140 @@ func TestGetWatchNamespace(t *testing.T) { for _, tt := range []struct { name string want string - csv *v1.ClusterExtension + csv *ocv1.ClusterExtension expectError bool }{ { - name: "cluster extension does not have watch namespace annotation", + name: "cluster extension does not configure a watch namespace", want: corev1.NamespaceAll, - csv: &v1.ClusterExtension{ + csv: &ocv1.ClusterExtension{ ObjectMeta: metav1.ObjectMeta{ Name: "extension", Annotations: nil, }, - Spec: v1.ClusterExtensionSpec{}, + Spec: ocv1.ClusterExtensionSpec{}, + }, + expectError: false, + }, { + name: "cluster extension configures a watch namespace", + want: "watch-namespace", + csv: &ocv1.ClusterExtension{ + ObjectMeta: metav1.ObjectMeta{ + Name: "extension", + }, + Spec: ocv1.ClusterExtensionSpec{ + Config: &ocv1.ClusterExtensionConfig{ + ConfigType: ocv1.ClusterExtensionConfigTypeInline, + Inline: &apiextensionsv1.JSON{ + Raw: []byte(`{"watchNamespace":"watch-namespace"}`), + }, + }, + }, }, expectError: false, }, { - name: "cluster extension has valid namespace annotation", + name: "cluster extension configures a watch namespace through annotation", want: "watch-namespace", - csv: &v1.ClusterExtension{ + csv: &ocv1.ClusterExtension{ ObjectMeta: metav1.ObjectMeta{ Name: "extension", Annotations: map[string]string{ "olm.operatorframework.io/watch-namespace": "watch-namespace", }, }, - Spec: v1.ClusterExtensionSpec{}, }, expectError: false, }, { - name: "cluster extension has invalid namespace annotation: multiple watch namespaces", - want: "", - csv: &v1.ClusterExtension{ + name: "cluster extension configures a watch namespace through annotation with invalid ns", + csv: &ocv1.ClusterExtension{ ObjectMeta: metav1.ObjectMeta{ Name: "extension", Annotations: map[string]string{ - "olm.operatorframework.io/watch-namespace": "watch-namespace,watch-namespace2,watch-namespace3", + "olm.operatorframework.io/watch-namespace": "watch-namespace-", }, }, - Spec: v1.ClusterExtensionSpec{}, }, expectError: true, }, { - name: "cluster extension has invalid namespace annotation: invalid name", - want: "", - csv: &v1.ClusterExtension{ + name: "cluster extension configures a watch namespace through annotation with empty ns", + csv: &ocv1.ClusterExtension{ ObjectMeta: metav1.ObjectMeta{ Name: "extension", Annotations: map[string]string{ - "olm.operatorframework.io/watch-namespace": "watch-namespace-", + "olm.operatorframework.io/watch-namespace": "", + }, + }, + }, + expectError: true, + }, { + name: "cluster extension configures a watch namespace through annotation and config (take config)", + want: "watch-namespace", + csv: &ocv1.ClusterExtension{ + ObjectMeta: metav1.ObjectMeta{ + Name: "extension", + Annotations: map[string]string{ + "olm.operatorframework.io/watch-namespace": "dont-use-this-watch-namespace", + }, + }, + Spec: ocv1.ClusterExtensionSpec{ + Config: &ocv1.ClusterExtensionConfig{ + ConfigType: ocv1.ClusterExtensionConfigTypeInline, + Inline: &apiextensionsv1.JSON{ + Raw: []byte(`{"watchNamespace":"watch-namespace"}`), + }, + }, + }, + }, + expectError: false, + }, { + name: "cluster extension configures an invalid watchNamespace: multiple watch namespaces", + want: "", + csv: &ocv1.ClusterExtension{ + ObjectMeta: metav1.ObjectMeta{ + Name: "extension", + }, + Spec: ocv1.ClusterExtensionSpec{ + Config: &ocv1.ClusterExtensionConfig{ + ConfigType: ocv1.ClusterExtensionConfigTypeInline, + Inline: &apiextensionsv1.JSON{ + Raw: []byte(`{"watchNamespace":"watch-namespace,watch-namespace2,watch-namespace3"}`), + }, + }, + }, + }, + expectError: true, + }, { + name: "cluster extension configures an invalid watchNamespace: invalid name", + want: "", + csv: &ocv1.ClusterExtension{ + ObjectMeta: metav1.ObjectMeta{ + Name: "extension", + }, + Spec: ocv1.ClusterExtensionSpec{ + Config: &ocv1.ClusterExtensionConfig{ + ConfigType: ocv1.ClusterExtensionConfigTypeInline, + Inline: &apiextensionsv1.JSON{ + Raw: []byte(`{"watchNamespace":"watch-namespace-"}`), + }, + }, + }, + }, + expectError: true, + }, { + name: "cluster extension configures an invalid watchNamespace: invalid json", + want: "", + csv: &ocv1.ClusterExtension{ + ObjectMeta: metav1.ObjectMeta{ + Name: "extension", + }, + Spec: ocv1.ClusterExtensionSpec{ + Config: &ocv1.ClusterExtensionConfig{ + ConfigType: ocv1.ClusterExtensionConfigTypeInline, + Inline: &apiextensionsv1.JSON{ + Raw: []byte(`invalid json`), + }, }, }, - Spec: v1.ClusterExtensionSpec{}, }, expectError: true, }, diff --git a/internal/operator-controller/controllers/clusterextension_controller.go b/internal/operator-controller/controllers/clusterextension_controller.go index 24824bfd1..ce6f63c3a 100644 --- a/internal/operator-controller/controllers/clusterextension_controller.go +++ b/internal/operator-controller/controllers/clusterextension_controller.go @@ -22,7 +22,6 @@ import ( "fmt" "io/fs" "strings" - "time" "github.com/go-logr/logr" "helm.sh/helm/v3/pkg/release" @@ -156,15 +155,13 @@ func ensureAllConditionsWithReason(ext *ocv1.ClusterExtension, reason v1alpha1.C cond := apimeta.FindStatusCondition(ext.Status.Conditions, condType) if cond == nil { // Create a new condition with a valid reason and add it - newCond := metav1.Condition{ + SetStatusCondition(&ext.Status.Conditions, metav1.Condition{ Type: condType, Status: metav1.ConditionFalse, Reason: string(reason), Message: message, ObservedGeneration: ext.GetGeneration(), - LastTransitionTime: metav1.NewTime(time.Now()), - } - ext.Status.Conditions = append(ext.Status.Conditions, newCond) + }) } } } @@ -381,7 +378,7 @@ func SetDeprecationStatus(ext *ocv1.ClusterExtension, bundleName string, depreca if len(deprecationMessages) > 0 { status, reason, message = metav1.ConditionTrue, ocv1.ReasonDeprecated, strings.Join(deprecationMessages, ";") } - apimeta.SetStatusCondition(&ext.Status.Conditions, metav1.Condition{ + SetStatusCondition(&ext.Status.Conditions, metav1.Condition{ Type: ocv1.TypeDeprecated, Reason: reason, Status: status, @@ -403,7 +400,7 @@ func SetDeprecationStatus(ext *ocv1.ClusterExtension, bundleName string, depreca message = fmt.Sprintf("%s\n%s", message, entry.Message) } } - apimeta.SetStatusCondition(&ext.Status.Conditions, metav1.Condition{ + SetStatusCondition(&ext.Status.Conditions, metav1.Condition{ Type: conditionType, Reason: reason, Status: status, diff --git a/internal/operator-controller/controllers/common_controller.go b/internal/operator-controller/controllers/common_controller.go index 7cee10c10..9195a83f9 100644 --- a/internal/operator-controller/controllers/common_controller.go +++ b/internal/operator-controller/controllers/common_controller.go @@ -27,6 +27,30 @@ import ( ocv1 "github.com/operator-framework/operator-controller/api/v1" ) +const ( + // maxConditionMessageLength set the max message length allowed by Kubernetes. + maxConditionMessageLength = 32768 + // truncationSuffix is the suffix added when a message is cut. + truncationSuffix = "\n\n... [message truncated]" +) + +// truncateMessage cuts long messages to fit Kubernetes condition limits +func truncateMessage(message string) string { + if len(message) <= maxConditionMessageLength { + return message + } + + maxContent := maxConditionMessageLength - len(truncationSuffix) + return message[:maxContent] + truncationSuffix +} + +// SetStatusCondition wraps apimeta.SetStatusCondition and ensures the message is always truncated +// This should be used throughout the codebase instead of apimeta.SetStatusCondition directly +func SetStatusCondition(conditions *[]metav1.Condition, condition metav1.Condition) { + condition.Message = truncateMessage(condition.Message) + apimeta.SetStatusCondition(conditions, condition) +} + // setInstalledStatusFromBundle sets the installed status based on the given installedBundle. func setInstalledStatusFromBundle(ext *ocv1.ClusterExtension, installedBundle *InstalledBundle) { // Nothing is installed @@ -45,7 +69,7 @@ func setInstalledStatusFromBundle(ext *ocv1.ClusterExtension, installedBundle *I // setInstalledStatusConditionSuccess sets the installed status condition to success. func setInstalledStatusConditionSuccess(ext *ocv1.ClusterExtension, message string) { - apimeta.SetStatusCondition(&ext.Status.Conditions, metav1.Condition{ + SetStatusCondition(&ext.Status.Conditions, metav1.Condition{ Type: ocv1.TypeInstalled, Status: metav1.ConditionTrue, Reason: ocv1.ReasonSucceeded, @@ -56,7 +80,7 @@ func setInstalledStatusConditionSuccess(ext *ocv1.ClusterExtension, message stri // setInstalledStatusConditionFailed sets the installed status condition to failed. func setInstalledStatusConditionFailed(ext *ocv1.ClusterExtension, message string) { - apimeta.SetStatusCondition(&ext.Status.Conditions, metav1.Condition{ + SetStatusCondition(&ext.Status.Conditions, metav1.Condition{ Type: ocv1.TypeInstalled, Status: metav1.ConditionFalse, Reason: ocv1.ReasonFailed, @@ -67,7 +91,7 @@ func setInstalledStatusConditionFailed(ext *ocv1.ClusterExtension, message strin // setInstalledStatusConditionUnknown sets the installed status condition to unknown. func setInstalledStatusConditionUnknown(ext *ocv1.ClusterExtension, message string) { - apimeta.SetStatusCondition(&ext.Status.Conditions, metav1.Condition{ + SetStatusCondition(&ext.Status.Conditions, metav1.Condition{ Type: ocv1.TypeInstalled, Status: metav1.ConditionUnknown, Reason: ocv1.ReasonFailed, @@ -99,5 +123,5 @@ func setStatusProgressing(ext *ocv1.ClusterExtension, err error) { progressingCond.Reason = ocv1.ReasonBlocked } - apimeta.SetStatusCondition(&ext.Status.Conditions, progressingCond) + SetStatusCondition(&ext.Status.Conditions, progressingCond) } diff --git a/internal/operator-controller/controllers/common_controller_test.go b/internal/operator-controller/controllers/common_controller_test.go index 7b644172d..057a2c9dc 100644 --- a/internal/operator-controller/controllers/common_controller_test.go +++ b/internal/operator-controller/controllers/common_controller_test.go @@ -2,6 +2,8 @@ package controllers import ( "errors" + "fmt" + "strings" "testing" "github.com/google/go-cmp/cmp" @@ -64,3 +66,177 @@ func TestSetStatusProgressing(t *testing.T) { }) } } + +func TestTruncateMessage(t *testing.T) { + tests := []struct { + name string + message string + expected string + }{ + { + name: "short message unchanged", + message: "This is a short message", + expected: "This is a short message", + }, + { + name: "empty message unchanged", + message: "", + expected: "", + }, + { + name: "exact max length message unchanged", + message: strings.Repeat("a", maxConditionMessageLength), + expected: strings.Repeat("a", maxConditionMessageLength), + }, + { + name: "message just over limit gets truncated", + message: strings.Repeat("a", maxConditionMessageLength+1), + expected: strings.Repeat("a", maxConditionMessageLength-len(truncationSuffix)) + truncationSuffix, + }, + { + name: "very long message gets truncated", + message: strings.Repeat("word ", 10000) + "finalword", + expected: strings.Repeat("word ", 10000)[:maxConditionMessageLength-len(truncationSuffix)] + truncationSuffix, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + result := truncateMessage(tc.message) + require.Equal(t, tc.expected, result) + + // Verify the result is within the limit + require.LessOrEqual(t, len(result), maxConditionMessageLength, + "truncated message should not exceed max length") + + // If the original message was over the limit, verify truncation occurred + if len(tc.message) > maxConditionMessageLength { + require.Contains(t, result, truncationSuffix, + "long messages should contain truncation suffix") + require.Less(t, len(result), len(tc.message), + "truncated message should be shorter than original") + } + }) + } +} + +func TestSetStatusProgressingWithLongMessage(t *testing.T) { + // Simulate a real ClusterExtension CRD upgrade safety check failure with many validation errors + longError := fmt.Sprintf("validating CRD upgrade safety for ClusterExtension 'my-operator': %s", + strings.Repeat("CRD \"myresources.example.com\" v1beta1->v1: field .spec.replicas changed from optional to required, field .spec.config.timeout type changed from string to integer, field .status.conditions[].observedGeneration removed\n", 500)) + + ext := &ocv1.ClusterExtension{ObjectMeta: metav1.ObjectMeta{Name: "my-operator"}} + err := errors.New(longError) + setStatusProgressing(ext, err) + + cond := meta.FindStatusCondition(ext.Status.Conditions, ocv1.TypeProgressing) + require.NotNil(t, cond) + require.LessOrEqual(t, len(cond.Message), maxConditionMessageLength) + require.Contains(t, cond.Message, truncationSuffix) + require.Contains(t, cond.Message, "validating CRD upgrade safety") +} + +func TestClusterExtensionDeprecationMessageTruncation(t *testing.T) { + // Test truncation for ClusterExtension deprecation warnings with many deprecated APIs + ext := &ocv1.ClusterExtension{ObjectMeta: metav1.ObjectMeta{Name: "legacy-operator"}} + + // Simulate many deprecation warnings that would overflow the message limit + deprecationMessages := []string{} + for i := 0; i < 1000; i++ { + deprecationMessages = append(deprecationMessages, fmt.Sprintf("API version 'v1beta1' of resource 'customresources%d.example.com' is deprecated, use 'v1' instead", i)) + } + + longDeprecationMsg := strings.Join(deprecationMessages, "; ") + setInstalledStatusConditionUnknown(ext, longDeprecationMsg) + + cond := meta.FindStatusCondition(ext.Status.Conditions, ocv1.TypeInstalled) + require.NotNil(t, cond) + require.LessOrEqual(t, len(cond.Message), maxConditionMessageLength) + require.Contains(t, cond.Message, truncationSuffix, "deprecation messages should be truncated when too long") + require.Contains(t, cond.Message, "API version", "should preserve important deprecation context") +} + +func TestClusterExtensionInstallationFailureTruncation(t *testing.T) { + // Test truncation for ClusterExtension installation failures with many bundle validation errors + installError := "failed to install ClusterExtension 'argocd-operator': bundle validation errors: " + + strings.Repeat("resource 'deployments/argocd-server' missing required label 'app.kubernetes.io/name', resource 'services/argocd-server-metrics' has invalid port configuration, resource 'configmaps/argocd-cm' contains invalid YAML in data field 'application.yaml'\n", 400) + + ext := &ocv1.ClusterExtension{ObjectMeta: metav1.ObjectMeta{Name: "argocd-operator"}} + setInstalledStatusConditionFailed(ext, installError) + + cond := meta.FindStatusCondition(ext.Status.Conditions, ocv1.TypeInstalled) + require.NotNil(t, cond) + + // Verify message was truncated due to length + require.LessOrEqual(t, len(cond.Message), maxConditionMessageLength) + require.Contains(t, cond.Message, truncationSuffix, "installation failure messages should be truncated when too long") + require.Contains(t, cond.Message, "failed to install ClusterExtension", "should preserve important context") + require.Equal(t, metav1.ConditionFalse, cond.Status) + require.Equal(t, ocv1.ReasonFailed, cond.Reason) + + // Verify original message was actually longer than the limit + require.Greater(t, len(installError), maxConditionMessageLength, "test should use a message that exceeds the limit") +} + +func TestSetStatusConditionWrapper(t *testing.T) { + tests := []struct { + name string + message string + expectedTruncated bool + }{ + { + name: "short message not truncated", + message: "This is a short message", + expectedTruncated: false, + }, + { + name: "long message gets truncated", + message: strings.Repeat("This is a very long message. ", 2000), + expectedTruncated: true, + }, + { + name: "message at exact limit not truncated", + message: strings.Repeat("a", maxConditionMessageLength), + expectedTruncated: false, + }, + { + name: "message over limit gets truncated", + message: strings.Repeat("a", maxConditionMessageLength+1), + expectedTruncated: true, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + var conditions []metav1.Condition + + // Use our wrapper function + SetStatusCondition(&conditions, metav1.Condition{ + Type: "TestCondition", + Status: metav1.ConditionTrue, + Reason: "Testing", + Message: tc.message, + }) + + require.Len(t, conditions, 1, "should have exactly one condition") + cond := conditions[0] + + // Verify message is within limits + require.LessOrEqual(t, len(cond.Message), maxConditionMessageLength, + "condition message should not exceed max length") + + // Check if truncation occurred as expected + if tc.expectedTruncated { + require.Contains(t, cond.Message, truncationSuffix, + "long messages should contain truncation suffix") + require.Less(t, len(cond.Message), len(tc.message), + "truncated message should be shorter than original") + } else { + require.Equal(t, tc.message, cond.Message, + "short messages should remain unchanged") + require.NotContains(t, cond.Message, truncationSuffix, + "short messages should not contain truncation suffix") + } + }) + } +} diff --git a/internal/operator-controller/rukpak/bundle/registryv1.go b/internal/operator-controller/rukpak/bundle/registryv1.go index bc757e63d..cffc374e9 100644 --- a/internal/operator-controller/rukpak/bundle/registryv1.go +++ b/internal/operator-controller/rukpak/bundle/registryv1.go @@ -7,6 +7,10 @@ import ( "github.com/operator-framework/api/pkg/operators/v1alpha1" ) +const ( + BundleConfigWatchNamespaceKey = "watchNamespace" +) + type RegistryV1 struct { PackageName string CSV v1alpha1.ClusterServiceVersion diff --git a/internal/operator-controller/rukpak/convert/helm.go b/internal/operator-controller/rukpak/convert/helm.go index 786601fdf..4a354b569 100644 --- a/internal/operator-controller/rukpak/convert/helm.go +++ b/internal/operator-controller/rukpak/convert/helm.go @@ -7,6 +7,7 @@ import ( "helm.sh/helm/v3/pkg/chart" + bundlepkg "github.com/operator-framework/operator-controller/internal/operator-controller/rukpak/bundle" "github.com/operator-framework/operator-controller/internal/operator-controller/rukpak/bundle/source" "github.com/operator-framework/operator-controller/internal/operator-controller/rukpak/render" ) @@ -17,12 +18,21 @@ type BundleToHelmChartConverter struct { IsWebhookSupportEnabled bool } -func (r *BundleToHelmChartConverter) ToHelmChart(bundle source.BundleSource, installNamespace string, watchNamespace string) (*chart.Chart, error) { +func (r *BundleToHelmChartConverter) ToHelmChart(bundle source.BundleSource, installNamespace string, config map[string]interface{}) (*chart.Chart, error) { rv1, err := bundle.GetBundle() if err != nil { return nil, err } + opts := []render.Option{ + render.WithCertificateProvider(r.CertificateProvider), + } + if config != nil { + if watchNs, ok := config[bundlepkg.BundleConfigWatchNamespaceKey].(string); ok { + opts = append(opts, render.WithTargetNamespaces(watchNs)) + } + } + if len(rv1.CSV.Spec.APIServiceDefinitions.Owned) > 0 { return nil, fmt.Errorf("unsupported bundle: apiServiceDefintions are not supported") } @@ -39,11 +49,7 @@ func (r *BundleToHelmChartConverter) ToHelmChart(bundle source.BundleSource, ins return nil, fmt.Errorf("unsupported bundle: webhookDefinitions are not supported") } - objs, err := r.BundleRenderer.Render( - rv1, installNamespace, - render.WithTargetNamespaces(watchNamespace), - render.WithCertificateProvider(r.CertificateProvider), - ) + objs, err := r.BundleRenderer.Render(rv1, installNamespace, opts...) if err != nil { return nil, fmt.Errorf("error rendering bundle: %w", err) diff --git a/internal/operator-controller/rukpak/convert/helm_test.go b/internal/operator-controller/rukpak/convert/helm_test.go index fdfa9812b..0151f7305 100644 --- a/internal/operator-controller/rukpak/convert/helm_test.go +++ b/internal/operator-controller/rukpak/convert/helm_test.go @@ -24,7 +24,10 @@ func Test_BundleToHelmChartConverter_ToHelmChart_ReturnsBundleSourceFailures(t * var failingBundleSource FakeBundleSource = func() (bundle.RegistryV1, error) { return bundle.RegistryV1{}, errors.New("some error") } - _, err := converter.ToHelmChart(failingBundleSource, "install-namespace", "watch-namespace") + config := map[string]interface{}{ + bundle.BundleConfigWatchNamespaceKey: "watch-namespace", + } + _, err := converter.ToHelmChart(failingBundleSource, "install-namespace", config) require.Error(t, err) require.Contains(t, err.Error(), "some error") } @@ -46,7 +49,10 @@ func Test_BundleToHelmChartConverter_ToHelmChart_ReturnsBundleRendererFailures(t }, ) - _, err := converter.ToHelmChart(b, "install-namespace", "") + config := map[string]interface{}{ + bundle.BundleConfigWatchNamespaceKey: "", + } + _, err := converter.ToHelmChart(b, "install-namespace", config) require.Error(t, err) require.Contains(t, err.Error(), "some error") } @@ -60,7 +66,10 @@ func Test_BundleToHelmChartConverter_ToHelmChart_NoAPIServiceDefinitions(t *test }, ) - _, err := converter.ToHelmChart(b, "install-namespace", "") + config := map[string]interface{}{ + bundle.BundleConfigWatchNamespaceKey: "", + } + _, err := converter.ToHelmChart(b, "install-namespace", config) require.Error(t, err) require.Contains(t, err.Error(), "unsupported bundle: apiServiceDefintions are not supported") } @@ -76,7 +85,10 @@ func Test_BundleToHelmChartConverter_ToHelmChart_NoWebhooksWithoutCertProvider(t }, ) - _, err := converter.ToHelmChart(b, "install-namespace", "") + config := map[string]interface{}{ + bundle.BundleConfigWatchNamespaceKey: "", + } + _, err := converter.ToHelmChart(b, "install-namespace", config) require.Error(t, err) require.Contains(t, err.Error(), "webhookDefinitions are not supported") } @@ -92,7 +104,10 @@ func Test_BundleToHelmChartConverter_ToHelmChart_WebhooksSupportDisabled(t *test }, ) - _, err := converter.ToHelmChart(b, "install-namespace", "") + config := map[string]interface{}{ + bundle.BundleConfigWatchNamespaceKey: "", + } + _, err := converter.ToHelmChart(b, "install-namespace", config) require.Error(t, err) require.Contains(t, err.Error(), "webhookDefinitions are not supported") } @@ -112,7 +127,10 @@ func Test_BundleToHelmChartConverter_ToHelmChart_WebhooksWithCertProvider(t *tes }, ) - _, err := converter.ToHelmChart(b, "install-namespace", "") + config := map[string]interface{}{ + bundle.BundleConfigWatchNamespaceKey: "", + } + _, err := converter.ToHelmChart(b, "install-namespace", config) require.NoError(t, err) } @@ -142,7 +160,10 @@ func Test_BundleToHelmChartConverter_ToHelmChart_BundleRendererIntegration(t *te }, ) - _, err := converter.ToHelmChart(b, expectedInstallNamespace, expectedWatchNamespace) + config := map[string]interface{}{ + bundle.BundleConfigWatchNamespaceKey: expectedWatchNamespace, + } + _, err := converter.ToHelmChart(b, expectedInstallNamespace, config) require.NoError(t, err) } @@ -181,7 +202,10 @@ func Test_BundleToHelmChartConverter_ToHelmChart_Success(t *testing.T) { }, ) - chart, err := converter.ToHelmChart(b, "install-namespace", "") + config := map[string]interface{}{ + bundle.BundleConfigWatchNamespaceKey: "", + } + chart, err := converter.ToHelmChart(b, "install-namespace", config) require.NoError(t, err) require.NotNil(t, chart) require.NotNil(t, chart.Metadata) diff --git a/internal/operator-controller/rukpak/preflights/crdupgradesafety/crdupgradesafety.go b/internal/operator-controller/rukpak/preflights/crdupgradesafety/crdupgradesafety.go index fadc85873..46c5e674d 100644 --- a/internal/operator-controller/rukpak/preflights/crdupgradesafety/crdupgradesafety.go +++ b/internal/operator-controller/rukpak/preflights/crdupgradesafety/crdupgradesafety.go @@ -15,6 +15,7 @@ import ( "sigs.k8s.io/crdify/pkg/config" "sigs.k8s.io/crdify/pkg/runner" "sigs.k8s.io/crdify/pkg/validations" + "sigs.k8s.io/crdify/pkg/validations/property" "github.com/operator-framework/operator-controller/internal/operator-controller/rukpak/util" ) @@ -130,6 +131,13 @@ func defaultConfig() *config.Config { Name: "description", Enforcement: config.EnforcementPolicyNone, }, + { + Name: "enum", + Enforcement: config.EnforcementPolicyError, + Configuration: map[string]interface{}{ + "additionPolicy": property.AdditionPolicyAllow, + }, + }, }, } } diff --git a/internal/operator-controller/rukpak/preflights/crdupgradesafety/crdupgradesafety_test.go b/internal/operator-controller/rukpak/preflights/crdupgradesafety/crdupgradesafety_test.go index 73db9673b..d1bd53905 100644 --- a/internal/operator-controller/rukpak/preflights/crdupgradesafety/crdupgradesafety_test.go +++ b/internal/operator-controller/rukpak/preflights/crdupgradesafety/crdupgradesafety_test.go @@ -38,7 +38,7 @@ func newMockPreflight(crd *apiextensionsv1.CustomResourceDefinition, err error) }, preflightOpts...) } -const crdFolder string = "../../../../../testdata/manifests" +const crdFolder string = "testdata/manifests" func getCrdFromManifestFile(t *testing.T, oldCrdFile string) *apiextensionsv1.CustomResourceDefinition { if oldCrdFile == "" { @@ -66,6 +66,14 @@ func getManifestString(t *testing.T, crdFile string) string { return string(buff) } +func wantErrorMsgs(wantMsgs []string) require.ErrorAssertionFunc { + return func(t require.TestingT, haveErr error, _ ...interface{}) { + for _, wantMsg := range wantMsgs { + require.ErrorContains(t, haveErr, wantMsg) + } + } +} + // TestInstall exists only for completeness as Install() is currently a no-op. It can be used as // a template for real tests in the future if the func is implemented. func TestInstall(t *testing.T) { @@ -73,7 +81,7 @@ func TestInstall(t *testing.T) { name string oldCrdPath string release *release.Release - wantErrMsgs []string + requireErr require.ErrorAssertionFunc wantCrdGetErr error }{ { @@ -91,7 +99,7 @@ func TestInstall(t *testing.T) { Name: "test-release", Manifest: "abcd", }, - wantErrMsgs: []string{"json: cannot unmarshal string into Go value of type unstructured.detector"}, + requireErr: wantErrorMsgs([]string{"json: cannot unmarshal string into Go value of type unstructured.detector"}), }, { name: "release with no CRD objects", @@ -107,7 +115,7 @@ func TestInstall(t *testing.T) { Manifest: getManifestString(t, "crd-valid-upgrade.json"), }, wantCrdGetErr: fmt.Errorf("error!"), - wantErrMsgs: []string{"error!"}, + requireErr: wantErrorMsgs([]string{"error!"}), }, { name: "fail to get old crd, not found error", @@ -123,7 +131,7 @@ func TestInstall(t *testing.T) { Name: "test-release", Manifest: getManifestString(t, "crd-invalid"), }, - wantErrMsgs: []string{"json: cannot unmarshal"}, + requireErr: wantErrorMsgs([]string{"json: cannot unmarshal"}), }, { name: "valid upgrade", @@ -142,7 +150,7 @@ func TestInstall(t *testing.T) { Name: "test-release", Manifest: getManifestString(t, "crd-invalid-upgrade.json"), }, - wantErrMsgs: []string{ + requireErr: wantErrorMsgs([]string{ `scope:`, `storedVersionRemoval:`, `enum:`, @@ -156,7 +164,7 @@ func TestInstall(t *testing.T) { `minLength:`, `minProperties:`, `default:`, - }, + }), }, { name: "new crd validation failure for existing field removal", @@ -167,9 +175,9 @@ func TestInstall(t *testing.T) { Name: "test-release", Manifest: getManifestString(t, "crd-field-removed.json"), }, - wantErrMsgs: []string{ + requireErr: wantErrorMsgs([]string{ `existingFieldRemoval:`, - }, + }), }, { name: "new crd validation should not fail on description changes", @@ -187,10 +195,8 @@ func TestInstall(t *testing.T) { t.Run(tc.name, func(t *testing.T) { preflight := newMockPreflight(getCrdFromManifestFile(t, tc.oldCrdPath), tc.wantCrdGetErr) err := preflight.Install(context.Background(), tc.release) - if len(tc.wantErrMsgs) != 0 { - for _, expectedErrMsg := range tc.wantErrMsgs { - require.ErrorContains(t, err, expectedErrMsg) - } + if tc.requireErr != nil { + tc.requireErr(t, err) } else { require.NoError(t, err) } @@ -203,7 +209,7 @@ func TestUpgrade(t *testing.T) { name string oldCrdPath string release *release.Release - wantErrMsgs []string + requireErr require.ErrorAssertionFunc wantCrdGetErr error }{ { @@ -221,7 +227,7 @@ func TestUpgrade(t *testing.T) { Name: "test-release", Manifest: "abcd", }, - wantErrMsgs: []string{"json: cannot unmarshal string into Go value of type unstructured.detector"}, + requireErr: wantErrorMsgs([]string{"json: cannot unmarshal string into Go value of type unstructured.detector"}), }, { name: "release with no CRD objects", @@ -237,7 +243,7 @@ func TestUpgrade(t *testing.T) { Manifest: getManifestString(t, "crd-valid-upgrade.json"), }, wantCrdGetErr: fmt.Errorf("error!"), - wantErrMsgs: []string{"error!"}, + requireErr: wantErrorMsgs([]string{"error!"}), }, { name: "fail to get old crd, not found error", @@ -253,7 +259,7 @@ func TestUpgrade(t *testing.T) { Name: "test-release", Manifest: getManifestString(t, "crd-invalid"), }, - wantErrMsgs: []string{"json: cannot unmarshal"}, + requireErr: wantErrorMsgs([]string{"json: cannot unmarshal"}), }, { name: "valid upgrade", @@ -272,7 +278,7 @@ func TestUpgrade(t *testing.T) { Name: "test-release", Manifest: getManifestString(t, "crd-invalid-upgrade.json"), }, - wantErrMsgs: []string{ + requireErr: wantErrorMsgs([]string{ `scope:`, `storedVersionRemoval:`, `enum:`, @@ -286,7 +292,7 @@ func TestUpgrade(t *testing.T) { `minLength:`, `minProperties:`, `default:`, - }, + }), }, { name: "new crd validation failure for existing field removal", @@ -297,9 +303,9 @@ func TestUpgrade(t *testing.T) { Name: "test-release", Manifest: getManifestString(t, "crd-field-removed.json"), }, - wantErrMsgs: []string{ + requireErr: wantErrorMsgs([]string{ `existingFieldRemoval:`, - }, + }), }, { name: "webhook conversion strategy exists", @@ -316,9 +322,9 @@ func TestUpgrade(t *testing.T) { Name: "test-release", Manifest: getManifestString(t, "crd-conversion-no-webhook.json"), }, - wantErrMsgs: []string{ - `validating upgrade for CRD "crontabs.stable.example.com": v1 <-> v2: ^.spec.foobarbaz: enum: allowed enum values removed`, - }, + requireErr: wantErrorMsgs([]string{ + `validating upgrade for CRD "crontabs.stable.example.com": v1 -> v2: ^.spec.foobarbaz: enum: allowed enum values removed`, + }), }, { name: "new crd validation should not fail on description changes", @@ -330,16 +336,43 @@ func TestUpgrade(t *testing.T) { Manifest: getManifestString(t, "crd-description-changed.json"), }, }, + { + name: "success when old crd and new crd contain the exact same validation issues", + oldCrdPath: "crd-conversion-no-webhook.json", + release: &release.Release{ + Name: "test-release", + Manifest: getManifestString(t, "crd-conversion-no-webhook.json"), + }, + }, + { + name: "failure when old crd and new crd contain the exact same validation issues, but new crd introduces another validation issue", + oldCrdPath: "crd-conversion-no-webhook.json", + release: &release.Release{ + Name: "test-release", + Manifest: getManifestString(t, "crd-conversion-no-webhook-extra-issue.json"), + }, + requireErr: func(t require.TestingT, err error, _ ...interface{}) { + require.ErrorContains(t, err, + `validating upgrade for CRD "crontabs.stable.example.com":`, + ) + // The newly introduced issue is reported + require.Contains(t, err.Error(), + `v1 -> v2: ^.spec.extraField: type: type changed : "boolean" -> "string"`, + ) + // The existing issue is not reported + require.NotContains(t, err.Error(), + `v1 -> v2: ^.spec.foobarbaz: enum: allowed enum values removed`, + ) + }, + }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { preflight := newMockPreflight(getCrdFromManifestFile(t, tc.oldCrdPath), tc.wantCrdGetErr) err := preflight.Upgrade(context.Background(), tc.release) - if len(tc.wantErrMsgs) != 0 { - for _, expectedErrMsg := range tc.wantErrMsgs { - require.ErrorContains(t, err, expectedErrMsg) - } + if tc.requireErr != nil { + tc.requireErr(t, err) } else { require.NoError(t, err) } diff --git a/internal/operator-controller/rukpak/preflights/crdupgradesafety/testdata/manifests/crd-conversion-no-webhook-extra-issue.json b/internal/operator-controller/rukpak/preflights/crdupgradesafety/testdata/manifests/crd-conversion-no-webhook-extra-issue.json new file mode 100644 index 000000000..0bfd13384 --- /dev/null +++ b/internal/operator-controller/rukpak/preflights/crdupgradesafety/testdata/manifests/crd-conversion-no-webhook-extra-issue.json @@ -0,0 +1,76 @@ +{ + "apiVersion": "apiextensions.k8s.io/v1", + "kind": "CustomResourceDefinition", + "metadata": { + "name": "crontabs.stable.example.com" + }, + "spec": { + "group": "stable.example.com", + "versions": [ + { + "name": "v2", + "served": true, + "storage": false, + "schema": { + "openAPIV3Schema": { + "type": "object", + "properties": { + "spec": { + "type": "object", + "properties": { + "foobarbaz": { + "type":"string", + "enum":[ + "bark", + "woof" + ] + }, + "extraField": { + "type":"string" + } + } + } + } + } + } + }, + { + "name": "v1", + "served": true, + "storage": false, + "schema": { + "openAPIV3Schema": { + "type": "object", + "properties": { + "spec": { + "type": "object", + "properties": { + "foobarbaz": { + "type":"string", + "enum":[ + "foo", + "bar", + "baz" + ] + }, + "extraField": { + "type":"boolean" + } + } + } + } + } + } + } + ], + "scope": "Cluster", + "names": { + "plural": "crontabs", + "singular": "crontab", + "kind": "CronTab", + "shortNames": [ + "ct" + ] + } + } +} diff --git a/testdata/manifests/crd-conversion-no-webhook.json b/internal/operator-controller/rukpak/preflights/crdupgradesafety/testdata/manifests/crd-conversion-no-webhook.json similarity index 100% rename from testdata/manifests/crd-conversion-no-webhook.json rename to internal/operator-controller/rukpak/preflights/crdupgradesafety/testdata/manifests/crd-conversion-no-webhook.json diff --git a/testdata/manifests/crd-conversion-webhook-old.json b/internal/operator-controller/rukpak/preflights/crdupgradesafety/testdata/manifests/crd-conversion-webhook-old.json similarity index 100% rename from testdata/manifests/crd-conversion-webhook-old.json rename to internal/operator-controller/rukpak/preflights/crdupgradesafety/testdata/manifests/crd-conversion-webhook-old.json diff --git a/testdata/manifests/crd-conversion-webhook.json b/internal/operator-controller/rukpak/preflights/crdupgradesafety/testdata/manifests/crd-conversion-webhook.json similarity index 100% rename from testdata/manifests/crd-conversion-webhook.json rename to internal/operator-controller/rukpak/preflights/crdupgradesafety/testdata/manifests/crd-conversion-webhook.json diff --git a/testdata/manifests/crd-description-changed.json b/internal/operator-controller/rukpak/preflights/crdupgradesafety/testdata/manifests/crd-description-changed.json similarity index 94% rename from testdata/manifests/crd-description-changed.json rename to internal/operator-controller/rukpak/preflights/crdupgradesafety/testdata/manifests/crd-description-changed.json index ae30459e3..0e7f9a600 100644 --- a/testdata/manifests/crd-description-changed.json +++ b/internal/operator-controller/rukpak/preflights/crdupgradesafety/testdata/manifests/crd-description-changed.json @@ -23,7 +23,8 @@ "type":"integer" }, "enum": { - "type":"integer" + "type": "string", + "enum": ["a", "b", "c"] }, "minMaxValue": { "type":"integer" @@ -70,7 +71,8 @@ "type":"integer" }, "enum": { - "type":"integer" + "type": "string", + "enum": ["a", "b", "c"] }, "minMaxValue": { "type":"integer" diff --git a/testdata/manifests/crd-field-removed.json b/internal/operator-controller/rukpak/preflights/crdupgradesafety/testdata/manifests/crd-field-removed.json similarity index 96% rename from testdata/manifests/crd-field-removed.json rename to internal/operator-controller/rukpak/preflights/crdupgradesafety/testdata/manifests/crd-field-removed.json index 86ba06e40..650b13fd4 100644 --- a/testdata/manifests/crd-field-removed.json +++ b/internal/operator-controller/rukpak/preflights/crdupgradesafety/testdata/manifests/crd-field-removed.json @@ -22,7 +22,8 @@ "type":"integer" }, "enum": { - "type":"integer" + "type": "string", + "enum": ["a", "b", "c"] }, "minMaxValue": { "type":"integer" @@ -66,7 +67,8 @@ "type": "object", "properties": { "enum": { - "type":"integer" + "type": "string", + "enum": ["a", "b", "c"] }, "minMaxValue": { "type":"integer" diff --git a/testdata/manifests/crd-invalid b/internal/operator-controller/rukpak/preflights/crdupgradesafety/testdata/manifests/crd-invalid similarity index 100% rename from testdata/manifests/crd-invalid rename to internal/operator-controller/rukpak/preflights/crdupgradesafety/testdata/manifests/crd-invalid diff --git a/testdata/manifests/crd-invalid-upgrade.json b/internal/operator-controller/rukpak/preflights/crdupgradesafety/testdata/manifests/crd-invalid-upgrade.json similarity index 92% rename from testdata/manifests/crd-invalid-upgrade.json rename to internal/operator-controller/rukpak/preflights/crdupgradesafety/testdata/manifests/crd-invalid-upgrade.json index 4131a68fb..3c95ccb25 100644 --- a/testdata/manifests/crd-invalid-upgrade.json +++ b/internal/operator-controller/rukpak/preflights/crdupgradesafety/testdata/manifests/crd-invalid-upgrade.json @@ -24,11 +24,8 @@ "type":"integer" }, "enum": { - "type":"integer", - "enum":[ - 1, - 2 - ] + "type": "string", + "enum": ["a", "b"] }, "minMaxValue": { "type":"integer", diff --git a/testdata/manifests/crd-valid-upgrade.json b/internal/operator-controller/rukpak/preflights/crdupgradesafety/testdata/manifests/crd-valid-upgrade.json similarity index 93% rename from testdata/manifests/crd-valid-upgrade.json rename to internal/operator-controller/rukpak/preflights/crdupgradesafety/testdata/manifests/crd-valid-upgrade.json index 52380dc92..cbc2e3ec1 100644 --- a/testdata/manifests/crd-valid-upgrade.json +++ b/internal/operator-controller/rukpak/preflights/crdupgradesafety/testdata/manifests/crd-valid-upgrade.json @@ -22,7 +22,8 @@ "type":"integer" }, "enum": { - "type":"integer" + "type": "string", + "enum": ["a", "b", "c", "adding-enum-is-allowed"] }, "minMaxValue": { "type":"integer" @@ -69,7 +70,8 @@ "type":"integer" }, "enum": { - "type":"integer" + "type": "string", + "enum": ["a", "b", "c", "adding-enum-is-allowed"] }, "minMaxValue": { "type":"integer" @@ -116,7 +118,8 @@ "type":"integer" }, "enum": { - "type":"integer" + "type": "string", + "enum": ["a", "b", "c", "adding-enum-is-allowed"] }, "minMaxValue": { "type":"integer" diff --git a/testdata/manifests/no-crds.json b/internal/operator-controller/rukpak/preflights/crdupgradesafety/testdata/manifests/no-crds.json similarity index 100% rename from testdata/manifests/no-crds.json rename to internal/operator-controller/rukpak/preflights/crdupgradesafety/testdata/manifests/no-crds.json diff --git a/testdata/manifests/old-crd.json b/internal/operator-controller/rukpak/preflights/crdupgradesafety/testdata/manifests/old-crd.json similarity index 94% rename from testdata/manifests/old-crd.json rename to internal/operator-controller/rukpak/preflights/crdupgradesafety/testdata/manifests/old-crd.json index 1f3ff5a4b..5a8c55b32 100644 --- a/testdata/manifests/old-crd.json +++ b/internal/operator-controller/rukpak/preflights/crdupgradesafety/testdata/manifests/old-crd.json @@ -23,7 +23,8 @@ "type":"integer" }, "enum": { - "type":"integer" + "type": "string", + "enum": ["a", "b", "c"] }, "minMaxValue": { "type":"integer" @@ -70,7 +71,8 @@ "type":"integer" }, "enum": { - "type":"integer" + "type": "string", + "enum": ["a", "b", "c"] }, "minMaxValue": { "type":"integer" diff --git a/internal/operator-controller/rukpak/render/render.go b/internal/operator-controller/rukpak/render/render.go index 70063f1d4..384b16796 100644 --- a/internal/operator-controller/rukpak/render/render.go +++ b/internal/operator-controller/rukpak/render/render.go @@ -4,7 +4,7 @@ import ( "errors" "fmt" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" "sigs.k8s.io/controller-runtime/pkg/client" @@ -74,9 +74,6 @@ func (o *Options) apply(opts ...Option) *Options { func (o *Options) validate(rv1 *bundle.RegistryV1) (*Options, []error) { var errs []error - if len(o.TargetNamespaces) == 0 { - errs = append(errs, errors.New("at least one target namespace must be specified")) - } if o.UniqueNameGenerator == nil { errs = append(errs, errors.New("unique name generator must be specified")) } @@ -88,9 +85,14 @@ func (o *Options) validate(rv1 *bundle.RegistryV1) (*Options, []error) { type Option func(*Options) +// WithTargetNamespaces sets the target namespaces to be used when rendering the bundle +// The value will only be used if len(namespaces) > 0. Otherwise, the default value for the bundle +// derived from its install mode support will be used (if such a value can be defined). func WithTargetNamespaces(namespaces ...string) Option { return func(o *Options) { - o.TargetNamespaces = namespaces + if len(namespaces) > 0 { + o.TargetNamespaces = namespaces + } } } @@ -121,7 +123,7 @@ func (r BundleRenderer) Render(rv1 bundle.RegistryV1, installNamespace string, o genOpts, errs := (&Options{ // default options InstallNamespace: installNamespace, - TargetNamespaces: []string{metav1.NamespaceAll}, + TargetNamespaces: defaultTargetNamespacesForBundle(&rv1, installNamespace), UniqueNameGenerator: DefaultUniqueNameGenerator, CertificateProvider: nil, }).apply(opts...).validate(&rv1) @@ -147,31 +149,69 @@ func DefaultUniqueNameGenerator(base string, o interface{}) (string, error) { } func validateTargetNamespaces(rv1 *bundle.RegistryV1, installNamespace string, targetNamespaces []string) error { - supportedInstallModes := sets.New[string]() - for _, im := range rv1.CSV.Spec.InstallModes { - if im.Supported { - supportedInstallModes.Insert(string(im.Type)) - } - } + supportedInstallModes := supportedBundleInstallModes(rv1) set := sets.New[string](targetNamespaces...) switch { - case set.Len() == 0 || (set.Len() == 1 && set.Has("")): - if supportedInstallModes.Has(string(v1alpha1.InstallModeTypeAllNamespaces)) { + case set.Len() == 0: + // Note: this function generally expects targetNamespace to contain at least one value set by default + // in case the user does not specify the value. The option to set the targetNamespace is a no-op if it is empty. + // The only case for which a default targetNamespace is undefined is in the case of a bundle that only + // supports SingleNamespace install mode. The if statement here is added to provide a more friendly error + // message than just the generic (at least one target namespace must be specified) which would occur + // in case only the MultiNamespace install mode is supported by the bundle. + // If AllNamespaces mode is supported, the default will be [""] -> watch all namespaces + // If only OwnNamespace is supported, the default will be [install-namespace] -> only watch the install/own namespace + if supportedInstallModes.Len() == 1 && supportedInstallModes.Has(v1alpha1.InstallModeTypeSingleNamespace) { + return errors.New("exactly one target namespace must be specified") + } + return errors.New("at least one target namespace must be specified") + case set.Len() == 1 && set.Has(""): + if supportedInstallModes.Has(v1alpha1.InstallModeTypeAllNamespaces) { return nil } return fmt.Errorf("supported install modes %v do not support targeting all namespaces", sets.List(supportedInstallModes)) case set.Len() == 1 && !set.Has(""): - if supportedInstallModes.Has(string(v1alpha1.InstallModeTypeSingleNamespace)) { + if targetNamespaces[0] == installNamespace { + if !supportedInstallModes.Has(v1alpha1.InstallModeTypeOwnNamespace) { + return fmt.Errorf("supported install modes %v do not support targeting own namespace", sets.List(supportedInstallModes)) + } return nil } - if supportedInstallModes.Has(string(v1alpha1.InstallModeTypeOwnNamespace)) && targetNamespaces[0] == installNamespace { + if supportedInstallModes.Has(v1alpha1.InstallModeTypeSingleNamespace) { return nil } default: - if supportedInstallModes.Has(string(v1alpha1.InstallModeTypeMultiNamespace)) && !set.Has("") { + if !supportedInstallModes.Has(v1alpha1.InstallModeTypeOwnNamespace) && set.Has(installNamespace) { + return fmt.Errorf("supported install modes %v do not support targeting own namespace", sets.List(supportedInstallModes)) + } + if supportedInstallModes.Has(v1alpha1.InstallModeTypeMultiNamespace) && !set.Has("") { return nil } } - return fmt.Errorf("supported install modes %v do not support target namespaces %v", sets.List[string](supportedInstallModes), targetNamespaces) + return fmt.Errorf("supported install modes %v do not support target namespaces %v", sets.List[v1alpha1.InstallModeType](supportedInstallModes), targetNamespaces) +} + +func defaultTargetNamespacesForBundle(rv1 *bundle.RegistryV1, installNamespace string) []string { + supportedInstallModes := supportedBundleInstallModes(rv1) + + if supportedInstallModes.Has(v1alpha1.InstallModeTypeAllNamespaces) { + return []string{corev1.NamespaceAll} + } + + if supportedInstallModes.Has(v1alpha1.InstallModeTypeOwnNamespace) { + return []string{installNamespace} + } + + return nil +} + +func supportedBundleInstallModes(rv1 *bundle.RegistryV1) sets.Set[v1alpha1.InstallModeType] { + supportedInstallModes := sets.New[v1alpha1.InstallModeType]() + for _, im := range rv1.CSV.Spec.InstallModes { + if im.Supported { + supportedInstallModes.Insert(im.Type) + } + } + return supportedInstallModes } diff --git a/internal/operator-controller/rukpak/render/render_test.go b/internal/operator-controller/rukpak/render/render_test.go index 0760c4fa1..0461ea3be 100644 --- a/internal/operator-controller/rukpak/render/render_test.go +++ b/internal/operator-controller/rukpak/render/render_test.go @@ -70,13 +70,12 @@ func Test_BundleRenderer_ValidatesRenderOptions(t *testing.T) { err error }{ { - name: "rejects empty targetNamespaces", + name: "accepts empty targetNamespaces (because it is ignored)", installNamespace: "install-namespace", csv: MakeCSV(WithInstallModeSupportFor(v1alpha1.InstallModeTypeAllNamespaces)), opts: []render.Option{ render.WithTargetNamespaces(), }, - err: errors.New("invalid option(s): at least one target namespace must be specified"), }, { name: "rejects nil unique name generator", installNamespace: "install-namespace", @@ -100,7 +99,7 @@ func Test_BundleRenderer_ValidatesRenderOptions(t *testing.T) { opts: []render.Option{ render.WithTargetNamespaces("install-namespace"), }, - err: errors.New("invalid option(s): invalid target namespaces [install-namespace]: supported install modes [AllNamespaces] do not support target namespaces [install-namespace]"), + err: errors.New("invalid option(s): invalid target namespaces [install-namespace]: supported install modes [AllNamespaces] do not support targeting own namespace"), }, { name: "rejects install out of own namespace if only OwnNamespace install mode is supported", installNamespace: "install-namespace", @@ -160,6 +159,14 @@ func Test_BundleRenderer_ValidatesRenderOptions(t *testing.T) { opts: []render.Option{ render.WithTargetNamespaces("n1", "n2", "n3"), }, + }, { + name: "reject multi namespace render if OwnNamespace install mode is not supported and target namespaces include install namespace", + installNamespace: "install-namespace", + csv: MakeCSV(WithInstallModeSupportFor(v1alpha1.InstallModeTypeMultiNamespace)), + opts: []render.Option{ + render.WithTargetNamespaces("n1", "n2", "n3", "install-namespace"), + }, + err: errors.New("invalid option(s): invalid target namespaces [n1 n2 n3 install-namespace]: supported install modes [MultiNamespace] do not support targeting own namespace"), }, } { t.Run(tc.name, func(t *testing.T) { diff --git a/manifests/experimental-e2e.yaml b/manifests/experimental-e2e.yaml index a91833bd7..36ef3661a 100644 --- a/manifests/experimental-e2e.yaml +++ b/manifests/experimental-e2e.yaml @@ -511,6 +511,40 @@ spec: description: spec is an optional field that defines the desired state of the ClusterExtension. properties: + config: + description: |- + config contains optional configuration values applied during rendering of the + ClusterExtension's manifests. Values can be specified inline. + + config is optional. When not specified, the default configuration of the resolved bundle will be used. + properties: + configType: + description: |- + configType is a required reference to the type of configuration source. + + Allowed values are "Inline" + + When this field is set to "Inline", the cluster extension configuration is defined inline within the + ClusterExtension resource. + enum: + - Inline + type: string + inline: + description: |- + inline contains JSON or YAML values specified directly in the + ClusterExtension. + + inline must be set if configType is 'Inline'. + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - configType + type: object + x-kubernetes-validations: + - message: inline is required when configType is Inline, and forbidden + otherwise + rule: 'has(self.configType) && self.configType == ''Inline'' ?has(self.inline) + : !has(self.inline)' install: description: |- install is an optional field used to configure the installation options diff --git a/manifests/experimental.yaml b/manifests/experimental.yaml index 00dc14153..291f34cff 100644 --- a/manifests/experimental.yaml +++ b/manifests/experimental.yaml @@ -511,6 +511,40 @@ spec: description: spec is an optional field that defines the desired state of the ClusterExtension. properties: + config: + description: |- + config contains optional configuration values applied during rendering of the + ClusterExtension's manifests. Values can be specified inline. + + config is optional. When not specified, the default configuration of the resolved bundle will be used. + properties: + configType: + description: |- + configType is a required reference to the type of configuration source. + + Allowed values are "Inline" + + When this field is set to "Inline", the cluster extension configuration is defined inline within the + ClusterExtension resource. + enum: + - Inline + type: string + inline: + description: |- + inline contains JSON or YAML values specified directly in the + ClusterExtension. + + inline must be set if configType is 'Inline'. + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - configType + type: object + x-kubernetes-validations: + - message: inline is required when configType is Inline, and forbidden + otherwise + rule: 'has(self.configType) && self.configType == ''Inline'' ?has(self.inline) + : !has(self.inline)' install: description: |- install is an optional field used to configure the installation options diff --git a/openshift/operator-controller/manifests-experimental/01-customresourcedefinition-clusterextensions.olm.operatorframework.io.yml b/openshift/operator-controller/manifests-experimental/01-customresourcedefinition-clusterextensions.olm.operatorframework.io.yml index 4c0c18fd1..4d67e06ee 100644 --- a/openshift/operator-controller/manifests-experimental/01-customresourcedefinition-clusterextensions.olm.operatorframework.io.yml +++ b/openshift/operator-controller/manifests-experimental/01-customresourcedefinition-clusterextensions.olm.operatorframework.io.yml @@ -56,6 +56,38 @@ spec: spec: description: spec is an optional field that defines the desired state of the ClusterExtension. properties: + config: + description: |- + config contains optional configuration values applied during rendering of the + ClusterExtension's manifests. Values can be specified inline. + + config is optional. When not specified, the default configuration of the resolved bundle will be used. + properties: + configType: + description: |- + configType is a required reference to the type of configuration source. + + Allowed values are "Inline" + + When this field is set to "Inline", the cluster extension configuration is defined inline within the + ClusterExtension resource. + enum: + - Inline + type: string + inline: + description: |- + inline contains JSON or YAML values specified directly in the + ClusterExtension. + + inline must be set if configType is 'Inline'. + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - configType + type: object + x-kubernetes-validations: + - message: inline is required when configType is Inline, and forbidden otherwise + rule: 'has(self.configType) && self.configType == ''Inline'' ?has(self.inline) : !has(self.inline)' install: description: |- install is an optional field used to configure the installation options diff --git a/requirements.txt b/requirements.txt index a24ec7a9a..aca833e67 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ Babel==2.17.0 -beautifulsoup4==4.13.4 +beautifulsoup4==4.13.5 certifi==2025.8.3 charset-normalizer==3.4.3 click==8.1.8 @@ -8,18 +8,18 @@ cssselect==1.3.0 ghp-import==2.1.0 idna==3.10 Jinja2==3.1.6 -lxml==6.0.0 +lxml==6.0.1 Markdown==3.8.2 markdown2==2.5.4 MarkupSafe==3.0.2 mergedeep==1.3.4 mkdocs==1.6.1 -mkdocs-material==9.6.17 +mkdocs-material==9.6.18 mkdocs-material-extensions==1.3.1 packaging==25.0 paginate==0.5.7 pathspec==0.12.1 -platformdirs==4.3.8 +platformdirs==4.4.0 Pygments==2.19.2 pymdown-extensions==10.16.1 pyquery==2.0.1 @@ -27,9 +27,9 @@ python-dateutil==2.9.0.post0 PyYAML==6.0.2 pyyaml_env_tag==1.1 readtime==3.0.0 -regex==2025.7.34 +regex==2025.9.1 requests==2.32.5 six==1.17.0 -soupsieve==2.7 +soupsieve==2.8 urllib3==2.5.0 watchdog==6.0.0 diff --git a/test/experimental-e2e/experimental_e2e_test.go b/test/experimental-e2e/experimental_e2e_test.go index 39c16e97f..eba429b91 100644 --- a/test/experimental-e2e/experimental_e2e_test.go +++ b/test/experimental-e2e/experimental_e2e_test.go @@ -248,6 +248,147 @@ func TestWebhookSupport(t *testing.T) { }, res.Object["spec"]) } +func TestClusterExtensionConfigSupport(t *testing.T) { + t.Log("Test support for cluster extension config") + defer utils.CollectTestArtifacts(t, artifactName, c, cfg) + + t.Log("By creating install namespace, watch namespace and necessary rbac resources") + namespace := corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-operator", + }, + } + require.NoError(t, c.Create(t.Context(), &namespace)) + t.Cleanup(func() { + require.NoError(t, c.Delete(context.Background(), &namespace)) + }) + + watchNamespace := corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-operator-watch", + }, + } + require.NoError(t, c.Create(t.Context(), &watchNamespace)) + t.Cleanup(func() { + require.NoError(t, c.Delete(context.Background(), &watchNamespace)) + }) + + serviceAccount := corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-operator-installer", + Namespace: namespace.GetName(), + }, + } + require.NoError(t, c.Create(t.Context(), &serviceAccount)) + t.Cleanup(func() { + require.NoError(t, c.Delete(context.Background(), &serviceAccount)) + }) + + clusterRoleBinding := &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-operator-installer", + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + APIGroup: corev1.GroupName, + Name: serviceAccount.GetName(), + Namespace: serviceAccount.GetNamespace(), + }, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: rbacv1.GroupName, + Kind: "ClusterRole", + Name: "cluster-admin", + }, + } + require.NoError(t, c.Create(t.Context(), clusterRoleBinding)) + t.Cleanup(func() { + require.NoError(t, c.Delete(context.Background(), clusterRoleBinding)) + }) + + t.Log("By creating the test-operator ClusterCatalog") + extensionCatalog := &ocv1.ClusterCatalog{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-operator-catalog", + }, + Spec: ocv1.ClusterCatalogSpec{ + Source: ocv1.CatalogSource{ + Type: ocv1.SourceTypeImage, + Image: &ocv1.ImageSource{ + Ref: fmt.Sprintf("%s/e2e/test-catalog:v1", os.Getenv("CLUSTER_REGISTRY_HOST")), + PollIntervalMinutes: ptr.To(1), + }, + }, + }, + } + require.NoError(t, c.Create(t.Context(), extensionCatalog)) + t.Cleanup(func() { + require.NoError(t, c.Delete(context.Background(), extensionCatalog)) + }) + + t.Log("By waiting for the catalog to serve its metadata") + require.EventuallyWithT(t, func(ct *assert.CollectT) { + require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: extensionCatalog.GetName()}, extensionCatalog)) + cond := apimeta.FindStatusCondition(extensionCatalog.Status.Conditions, ocv1.TypeServing) + require.NotNil(ct, cond) + require.Equal(ct, metav1.ConditionTrue, cond.Status) + require.Equal(ct, ocv1.ReasonAvailable, cond.Reason) + }, pollDuration, pollInterval) + + t.Log("By installing the test-operator ClusterExtension configured in SingleNamespace mode") + clusterExtension := &ocv1.ClusterExtension{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-operator-extension", + }, + Spec: ocv1.ClusterExtensionSpec{ + Source: ocv1.SourceConfig{ + SourceType: "Catalog", + Catalog: &ocv1.CatalogFilter{ + PackageName: "test", + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"olm.operatorframework.io/metadata.name": extensionCatalog.Name}, + }, + }, + }, + Namespace: namespace.GetName(), + ServiceAccount: ocv1.ServiceAccountReference{ + Name: serviceAccount.GetName(), + }, + Config: &ocv1.ClusterExtensionConfig{ + ConfigType: ocv1.ClusterExtensionConfigTypeInline, + Inline: &apiextensionsv1.JSON{ + Raw: []byte(fmt.Sprintf(`{"watchNamespace": "%s"}`, watchNamespace.GetName())), + }, + }, + }, + } + require.NoError(t, c.Create(t.Context(), clusterExtension)) + t.Cleanup(func() { + require.NoError(t, c.Delete(context.Background(), clusterExtension)) + }) + + t.Log("By waiting for test-operator extension to be installed successfully") + require.EventuallyWithT(t, func(ct *assert.CollectT) { + require.NoError(ct, c.Get(t.Context(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) + cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) + require.NotNil(ct, cond) + require.Equal(ct, metav1.ConditionTrue, cond.Status) + require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) + require.Contains(ct, cond.Message, "Installed bundle") + require.NotNil(ct, clusterExtension.Status.Install) + require.NotEmpty(ct, clusterExtension.Status.Install.Bundle) + }, pollDuration, pollInterval) + + t.Log("By ensuring the test-operator deployment is correctly configured to watch the watch namespace") + require.EventuallyWithT(t, func(ct *assert.CollectT) { + deployment := &appsv1.Deployment{} + require.NoError(ct, c.Get(t.Context(), types.NamespacedName{Namespace: namespace.GetName(), Name: "test-operator"}, deployment)) + require.NotNil(ct, deployment.Spec.Template.GetAnnotations()) + require.Equal(ct, watchNamespace.GetName(), deployment.Spec.Template.GetAnnotations()["olm.targetNamespaces"]) + }, pollDuration, pollInterval) +} + func getWebhookOperatorResource(name string, namespace string, valid bool) *unstructured.Unstructured { return &unstructured.Unstructured{ Object: map[string]interface{}{ diff --git a/testdata/images/bundles/test-operator/v1.0.0/manifests/testoperator.clusterserviceversion.yaml b/testdata/images/bundles/test-operator/v1.0.0/manifests/testoperator.clusterserviceversion.yaml index 3520f53db..54924492b 100644 --- a/testdata/images/bundles/test-operator/v1.0.0/manifests/testoperator.clusterserviceversion.yaml +++ b/testdata/images/bundles/test-operator/v1.0.0/manifests/testoperator.clusterserviceversion.yaml @@ -130,7 +130,7 @@ spec: installModes: - supported: false type: OwnNamespace - - supported: false + - supported: true type: SingleNamespace - supported: false type: MultiNamespace diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go index 5510e2e79..f494845b8 100644 --- a/vendor/github.com/containers/image/v5/version/version.go +++ b/vendor/github.com/containers/image/v5/version/version.go @@ -8,7 +8,7 @@ const ( // VersionMinor is for functionality in a backwards-compatible manner VersionMinor = 36 // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 1 + VersionPatch = 2 // VersionDev indicates development branch. Releases will be empty string. VersionDev = "" diff --git a/vendor/github.com/go-jose/go-jose/v4/jwe.go b/vendor/github.com/go-jose/go-jose/v4/jwe.go index 9f1322dcc..6102f9100 100644 --- a/vendor/github.com/go-jose/go-jose/v4/jwe.go +++ b/vendor/github.com/go-jose/go-jose/v4/jwe.go @@ -274,7 +274,7 @@ func validateAlgEnc(headers rawHeader, keyAlgorithms []KeyAlgorithm, contentEncr if alg != "" && !containsKeyAlgorithm(keyAlgorithms, alg) { return fmt.Errorf("unexpected key algorithm %q; expected %q", alg, keyAlgorithms) } - if alg != "" && !containsContentEncryption(contentEncryption, enc) { + if enc != "" && !containsContentEncryption(contentEncryption, enc) { return fmt.Errorf("unexpected content encryption algorithm %q; expected %q", enc, contentEncryption) } return nil @@ -288,11 +288,20 @@ func ParseEncryptedCompact( keyAlgorithms []KeyAlgorithm, contentEncryption []ContentEncryption, ) (*JSONWebEncryption, error) { - // Five parts is four separators - if strings.Count(input, ".") != 4 { - return nil, fmt.Errorf("go-jose/go-jose: compact JWE format must have five parts") + var parts [5]string + var ok bool + + for i := range 4 { + parts[i], input, ok = strings.Cut(input, ".") + if !ok { + return nil, errors.New("go-jose/go-jose: compact JWE format must have five parts") + } + } + // Validate that the last part does not contain more dots + if strings.ContainsRune(input, '.') { + return nil, errors.New("go-jose/go-jose: compact JWE format must have five parts") } - parts := strings.SplitN(input, ".", 5) + parts[4] = input rawProtected, err := base64.RawURLEncoding.DecodeString(parts[0]) if err != nil { diff --git a/vendor/github.com/go-jose/go-jose/v4/jwk.go b/vendor/github.com/go-jose/go-jose/v4/jwk.go index 9e57e93ba..9700f8906 100644 --- a/vendor/github.com/go-jose/go-jose/v4/jwk.go +++ b/vendor/github.com/go-jose/go-jose/v4/jwk.go @@ -239,7 +239,7 @@ func (k *JSONWebKey) UnmarshalJSON(data []byte) (err error) { keyPub = key } } else { - return fmt.Errorf("go-jose/go-jose: unknown curve %s'", raw.Crv) + return fmt.Errorf("go-jose/go-jose: unknown curve '%s'", raw.Crv) } default: return fmt.Errorf("go-jose/go-jose: unknown json web key type '%s'", raw.Kty) diff --git a/vendor/github.com/go-jose/go-jose/v4/jws.go b/vendor/github.com/go-jose/go-jose/v4/jws.go index be2b73308..c40bd3ec1 100644 --- a/vendor/github.com/go-jose/go-jose/v4/jws.go +++ b/vendor/github.com/go-jose/go-jose/v4/jws.go @@ -361,35 +361,43 @@ func (parsed *rawJSONWebSignature) sanitized(signatureAlgorithms []SignatureAlgo return obj, nil } +const tokenDelim = "." + // parseSignedCompact parses a message in compact format. func parseSignedCompact( input string, payload []byte, signatureAlgorithms []SignatureAlgorithm, ) (*JSONWebSignature, error) { - // Three parts is two separators - if strings.Count(input, ".") != 2 { + protected, s, ok := strings.Cut(input, tokenDelim) + if !ok { // no period found + return nil, fmt.Errorf("go-jose/go-jose: compact JWS format must have three parts") + } + claims, sig, ok := strings.Cut(s, tokenDelim) + if !ok { // only one period found + return nil, fmt.Errorf("go-jose/go-jose: compact JWS format must have three parts") + } + if strings.ContainsRune(sig, '.') { // too many periods found return nil, fmt.Errorf("go-jose/go-jose: compact JWS format must have three parts") } - parts := strings.SplitN(input, ".", 3) - if parts[1] != "" && payload != nil { + if claims != "" && payload != nil { return nil, fmt.Errorf("go-jose/go-jose: payload is not detached") } - rawProtected, err := base64.RawURLEncoding.DecodeString(parts[0]) + rawProtected, err := base64.RawURLEncoding.DecodeString(protected) if err != nil { return nil, err } if payload == nil { - payload, err = base64.RawURLEncoding.DecodeString(parts[1]) + payload, err = base64.RawURLEncoding.DecodeString(claims) if err != nil { return nil, err } } - signature, err := base64.RawURLEncoding.DecodeString(parts[2]) + signature, err := base64.RawURLEncoding.DecodeString(sig) if err != nil { return nil, err } diff --git a/vendor/github.com/go-jose/go-jose/v4/symmetric.go b/vendor/github.com/go-jose/go-jose/v4/symmetric.go index 09efefb26..6176e0607 100644 --- a/vendor/github.com/go-jose/go-jose/v4/symmetric.go +++ b/vendor/github.com/go-jose/go-jose/v4/symmetric.go @@ -21,7 +21,6 @@ import ( "crypto/aes" "crypto/cipher" "crypto/hmac" - "crypto/pbkdf2" "crypto/rand" "crypto/sha256" "crypto/sha512" @@ -329,7 +328,7 @@ func (ctx *symmetricKeyCipher) encryptKey(cek []byte, alg KeyAlgorithm) (recipie // derive key keyLen, h := getPbkdf2Params(alg) - key, err := pbkdf2.Key(h, string(ctx.key), salt, ctx.p2c, keyLen) + key, err := pbkdf2Key(h, string(ctx.key), salt, ctx.p2c, keyLen) if err != nil { return recipientInfo{}, nil } @@ -434,7 +433,7 @@ func (ctx *symmetricKeyCipher) decryptKey(headers rawHeader, recipient *recipien // derive key keyLen, h := getPbkdf2Params(alg) - key, err := pbkdf2.Key(h, string(ctx.key), salt, p2c, keyLen) + key, err := pbkdf2Key(h, string(ctx.key), salt, p2c, keyLen) if err != nil { return nil, err } diff --git a/vendor/github.com/go-jose/go-jose/v4/symmetric_go124.go b/vendor/github.com/go-jose/go-jose/v4/symmetric_go124.go new file mode 100644 index 000000000..6c5a4e7f2 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v4/symmetric_go124.go @@ -0,0 +1,28 @@ +//go:build go1.24 + +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jose + +import ( + "crypto/pbkdf2" + "hash" +) + +func pbkdf2Key(h func() hash.Hash, password string, salt []byte, iter, keyLen int) ([]byte, error) { + return pbkdf2.Key(h, password, salt, iter, keyLen) +} diff --git a/vendor/github.com/go-jose/go-jose/v4/symmetric_legacy.go b/vendor/github.com/go-jose/go-jose/v4/symmetric_legacy.go new file mode 100644 index 000000000..bdfc3d766 --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v4/symmetric_legacy.go @@ -0,0 +1,29 @@ +//go:build !go1.24 + +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jose + +import ( + "hash" + + "golang.org/x/crypto/pbkdf2" +) + +func pbkdf2Key(h func() hash.Hash, password string, salt []byte, iter, keyLen int) ([]byte, error) { + return pbkdf2.Key([]byte(password), salt, iter, keyLen, h), nil +} diff --git a/vendor/github.com/grafana/regexp/.gitignore b/vendor/github.com/grafana/regexp/.gitignore new file mode 100644 index 000000000..66fd13c90 --- /dev/null +++ b/vendor/github.com/grafana/regexp/.gitignore @@ -0,0 +1,15 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ diff --git a/vendor/github.com/grafana/regexp/LICENSE b/vendor/github.com/grafana/regexp/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/vendor/github.com/grafana/regexp/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/grafana/regexp/README.md b/vendor/github.com/grafana/regexp/README.md new file mode 100644 index 000000000..756e60dcf --- /dev/null +++ b/vendor/github.com/grafana/regexp/README.md @@ -0,0 +1,12 @@ +# Grafana Go regexp package +This repo is a fork of the upstream Go `regexp` package, with some code optimisations to make it run faster. + +All the optimisations have been submitted upstream, but not yet merged. + +All semantics are the same, and the optimised code passes all tests from upstream. + +The `main` branch is non-optimised: switch over to [`speedup`](https://github.com/grafana/regexp/tree/speedup) branch for the improved code. + +## Benchmarks: + +![image](https://user-images.githubusercontent.com/8125524/152182951-856549ed-6044-4285-b799-69b31f598e32.png) diff --git a/vendor/github.com/grafana/regexp/backtrack.go b/vendor/github.com/grafana/regexp/backtrack.go new file mode 100644 index 000000000..7c37c66a8 --- /dev/null +++ b/vendor/github.com/grafana/regexp/backtrack.go @@ -0,0 +1,365 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// backtrack is a regular expression search with submatch +// tracking for small regular expressions and texts. It allocates +// a bit vector with (length of input) * (length of prog) bits, +// to make sure it never explores the same (character position, instruction) +// state multiple times. This limits the search to run in time linear in +// the length of the test. +// +// backtrack is a fast replacement for the NFA code on small +// regexps when onepass cannot be used. + +package regexp + +import ( + "regexp/syntax" + "sync" +) + +// A job is an entry on the backtracker's job stack. It holds +// the instruction pc and the position in the input. +type job struct { + pc uint32 + arg bool + pos int +} + +const ( + visitedBits = 32 + maxBacktrackProg = 500 // len(prog.Inst) <= max + maxBacktrackVector = 256 * 1024 // bit vector size <= max (bits) +) + +// bitState holds state for the backtracker. +type bitState struct { + end int + cap []int + matchcap []int + jobs []job + visited []uint32 + + inputs inputs +} + +var bitStatePool sync.Pool + +func newBitState() *bitState { + b, ok := bitStatePool.Get().(*bitState) + if !ok { + b = new(bitState) + } + return b +} + +func freeBitState(b *bitState) { + b.inputs.clear() + bitStatePool.Put(b) +} + +// maxBitStateLen returns the maximum length of a string to search with +// the backtracker using prog. +func maxBitStateLen(prog *syntax.Prog) int { + if !shouldBacktrack(prog) { + return 0 + } + return maxBacktrackVector / len(prog.Inst) +} + +// shouldBacktrack reports whether the program is too +// long for the backtracker to run. +func shouldBacktrack(prog *syntax.Prog) bool { + return len(prog.Inst) <= maxBacktrackProg +} + +// reset resets the state of the backtracker. +// end is the end position in the input. +// ncap is the number of captures. +func (b *bitState) reset(prog *syntax.Prog, end int, ncap int) { + b.end = end + + if cap(b.jobs) == 0 { + b.jobs = make([]job, 0, 256) + } else { + b.jobs = b.jobs[:0] + } + + visitedSize := (len(prog.Inst)*(end+1) + visitedBits - 1) / visitedBits + if cap(b.visited) < visitedSize { + b.visited = make([]uint32, visitedSize, maxBacktrackVector/visitedBits) + } else { + b.visited = b.visited[:visitedSize] + clear(b.visited) // set to 0 + } + + if cap(b.cap) < ncap { + b.cap = make([]int, ncap) + } else { + b.cap = b.cap[:ncap] + } + for i := range b.cap { + b.cap[i] = -1 + } + + if cap(b.matchcap) < ncap { + b.matchcap = make([]int, ncap) + } else { + b.matchcap = b.matchcap[:ncap] + } + for i := range b.matchcap { + b.matchcap[i] = -1 + } +} + +// shouldVisit reports whether the combination of (pc, pos) has not +// been visited yet. +func (b *bitState) shouldVisit(pc uint32, pos int) bool { + n := uint(int(pc)*(b.end+1) + pos) + if b.visited[n/visitedBits]&(1<<(n&(visitedBits-1))) != 0 { + return false + } + b.visited[n/visitedBits] |= 1 << (n & (visitedBits - 1)) + return true +} + +// push pushes (pc, pos, arg) onto the job stack if it should be +// visited. +func (b *bitState) push(re *Regexp, pc uint32, pos int, arg bool) { + // Only check shouldVisit when arg is false. + // When arg is true, we are continuing a previous visit. + if re.prog.Inst[pc].Op != syntax.InstFail && (arg || b.shouldVisit(pc, pos)) { + b.jobs = append(b.jobs, job{pc: pc, arg: arg, pos: pos}) + } +} + +// tryBacktrack runs a backtracking search starting at pos. +func (re *Regexp) tryBacktrack(b *bitState, i input, pc uint32, pos int) bool { + longest := re.longest + + b.push(re, pc, pos, false) + for len(b.jobs) > 0 { + l := len(b.jobs) - 1 + // Pop job off the stack. + pc := b.jobs[l].pc + pos := b.jobs[l].pos + arg := b.jobs[l].arg + b.jobs = b.jobs[:l] + + // Optimization: rather than push and pop, + // code that is going to Push and continue + // the loop simply updates ip, p, and arg + // and jumps to CheckAndLoop. We have to + // do the ShouldVisit check that Push + // would have, but we avoid the stack + // manipulation. + goto Skip + CheckAndLoop: + if !b.shouldVisit(pc, pos) { + continue + } + Skip: + + inst := &re.prog.Inst[pc] + + switch inst.Op { + default: + panic("bad inst") + case syntax.InstFail: + panic("unexpected InstFail") + case syntax.InstAlt: + // Cannot just + // b.push(inst.Out, pos, false) + // b.push(inst.Arg, pos, false) + // If during the processing of inst.Out, we encounter + // inst.Arg via another path, we want to process it then. + // Pushing it here will inhibit that. Instead, re-push + // inst with arg==true as a reminder to push inst.Arg out + // later. + if arg { + // Finished inst.Out; try inst.Arg. + arg = false + pc = inst.Arg + goto CheckAndLoop + } else { + b.push(re, pc, pos, true) + pc = inst.Out + goto CheckAndLoop + } + + case syntax.InstAltMatch: + // One opcode consumes runes; the other leads to match. + switch re.prog.Inst[inst.Out].Op { + case syntax.InstRune, syntax.InstRune1, syntax.InstRuneAny, syntax.InstRuneAnyNotNL: + // inst.Arg is the match. + b.push(re, inst.Arg, pos, false) + pc = inst.Arg + pos = b.end + goto CheckAndLoop + } + // inst.Out is the match - non-greedy + b.push(re, inst.Out, b.end, false) + pc = inst.Out + goto CheckAndLoop + + case syntax.InstRune: + r, width := i.step(pos) + if !inst.MatchRune(r) { + continue + } + pos += width + pc = inst.Out + goto CheckAndLoop + + case syntax.InstRune1: + r, width := i.step(pos) + if r != inst.Rune[0] { + continue + } + pos += width + pc = inst.Out + goto CheckAndLoop + + case syntax.InstRuneAnyNotNL: + r, width := i.step(pos) + if r == '\n' || r == endOfText { + continue + } + pos += width + pc = inst.Out + goto CheckAndLoop + + case syntax.InstRuneAny: + r, width := i.step(pos) + if r == endOfText { + continue + } + pos += width + pc = inst.Out + goto CheckAndLoop + + case syntax.InstCapture: + if arg { + // Finished inst.Out; restore the old value. + b.cap[inst.Arg] = pos + continue + } else { + if inst.Arg < uint32(len(b.cap)) { + // Capture pos to register, but save old value. + b.push(re, pc, b.cap[inst.Arg], true) // come back when we're done. + b.cap[inst.Arg] = pos + } + pc = inst.Out + goto CheckAndLoop + } + + case syntax.InstEmptyWidth: + flag := i.context(pos) + if !flag.match(syntax.EmptyOp(inst.Arg)) { + continue + } + pc = inst.Out + goto CheckAndLoop + + case syntax.InstNop: + pc = inst.Out + goto CheckAndLoop + + case syntax.InstMatch: + // We found a match. If the caller doesn't care + // where the match is, no point going further. + if len(b.cap) == 0 { + return true + } + + // Record best match so far. + // Only need to check end point, because this entire + // call is only considering one start position. + if len(b.cap) > 1 { + b.cap[1] = pos + } + if old := b.matchcap[1]; old == -1 || (longest && pos > 0 && pos > old) { + copy(b.matchcap, b.cap) + } + + // If going for first match, we're done. + if !longest { + return true + } + + // If we used the entire text, no longer match is possible. + if pos == b.end { + return true + } + + // Otherwise, continue on in hope of a longer match. + continue + } + } + + return longest && len(b.matchcap) > 1 && b.matchcap[1] >= 0 +} + +// backtrack runs a backtracking search of prog on the input starting at pos. +func (re *Regexp) backtrack(ib []byte, is string, pos int, ncap int, dstCap []int) []int { + startCond := re.cond + if startCond == ^syntax.EmptyOp(0) { // impossible + return nil + } + if startCond&syntax.EmptyBeginText != 0 && pos != 0 { + // Anchored match, past beginning of text. + return nil + } + + b := newBitState() + i, end := b.inputs.init(nil, ib, is) + b.reset(re.prog, end, ncap) + + // Anchored search must start at the beginning of the input + if startCond&syntax.EmptyBeginText != 0 { + if len(b.cap) > 0 { + b.cap[0] = pos + } + if !re.tryBacktrack(b, i, uint32(re.prog.Start), pos) { + freeBitState(b) + return nil + } + } else { + + // Unanchored search, starting from each possible text position. + // Notice that we have to try the empty string at the end of + // the text, so the loop condition is pos <= end, not pos < end. + // This looks like it's quadratic in the size of the text, + // but we are not clearing visited between calls to TrySearch, + // so no work is duplicated and it ends up still being linear. + width := -1 + for ; pos <= end && width != 0; pos += width { + if len(re.prefix) > 0 { + // Match requires literal prefix; fast search for it. + advance := i.index(re, pos) + if advance < 0 { + freeBitState(b) + return nil + } + pos += advance + } + + if len(b.cap) > 0 { + b.cap[0] = pos + } + if re.tryBacktrack(b, i, uint32(re.prog.Start), pos) { + // Match must be leftmost; done. + goto Match + } + _, width = i.step(pos) + } + freeBitState(b) + return nil + } + +Match: + dstCap = append(dstCap, b.matchcap...) + freeBitState(b) + return dstCap +} diff --git a/vendor/github.com/grafana/regexp/exec.go b/vendor/github.com/grafana/regexp/exec.go new file mode 100644 index 000000000..3fc4b684f --- /dev/null +++ b/vendor/github.com/grafana/regexp/exec.go @@ -0,0 +1,554 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package regexp + +import ( + "io" + "regexp/syntax" + "sync" +) + +// A queue is a 'sparse array' holding pending threads of execution. +// See https://research.swtch.com/2008/03/using-uninitialized-memory-for-fun-and.html +type queue struct { + sparse []uint32 + dense []entry +} + +// An entry is an entry on a queue. +// It holds both the instruction pc and the actual thread. +// Some queue entries are just place holders so that the machine +// knows it has considered that pc. Such entries have t == nil. +type entry struct { + pc uint32 + t *thread +} + +// A thread is the state of a single path through the machine: +// an instruction and a corresponding capture array. +// See https://swtch.com/~rsc/regexp/regexp2.html +type thread struct { + inst *syntax.Inst + cap []int +} + +// A machine holds all the state during an NFA simulation for p. +type machine struct { + re *Regexp // corresponding Regexp + p *syntax.Prog // compiled program + q0, q1 queue // two queues for runq, nextq + pool []*thread // pool of available threads + matched bool // whether a match was found + matchcap []int // capture information for the match + + inputs inputs +} + +type inputs struct { + // cached inputs, to avoid allocation + bytes inputBytes + string inputString + reader inputReader +} + +func (i *inputs) newBytes(b []byte) input { + i.bytes.str = b + return &i.bytes +} + +func (i *inputs) newString(s string) input { + i.string.str = s + return &i.string +} + +func (i *inputs) newReader(r io.RuneReader) input { + i.reader.r = r + i.reader.atEOT = false + i.reader.pos = 0 + return &i.reader +} + +func (i *inputs) clear() { + // We need to clear 1 of these. + // Avoid the expense of clearing the others (pointer write barrier). + if i.bytes.str != nil { + i.bytes.str = nil + } else if i.reader.r != nil { + i.reader.r = nil + } else { + i.string.str = "" + } +} + +func (i *inputs) init(r io.RuneReader, b []byte, s string) (input, int) { + if r != nil { + return i.newReader(r), 0 + } + if b != nil { + return i.newBytes(b), len(b) + } + return i.newString(s), len(s) +} + +func (m *machine) init(ncap int) { + for _, t := range m.pool { + t.cap = t.cap[:ncap] + } + m.matchcap = m.matchcap[:ncap] +} + +// alloc allocates a new thread with the given instruction. +// It uses the free pool if possible. +func (m *machine) alloc(i *syntax.Inst) *thread { + var t *thread + if n := len(m.pool); n > 0 { + t = m.pool[n-1] + m.pool = m.pool[:n-1] + } else { + t = new(thread) + t.cap = make([]int, len(m.matchcap), cap(m.matchcap)) + } + t.inst = i + return t +} + +// A lazyFlag is a lazily-evaluated syntax.EmptyOp, +// for checking zero-width flags like ^ $ \A \z \B \b. +// It records the pair of relevant runes and does not +// determine the implied flags until absolutely necessary +// (most of the time, that means never). +type lazyFlag uint64 + +func newLazyFlag(r1, r2 rune) lazyFlag { + return lazyFlag(uint64(r1)<<32 | uint64(uint32(r2))) +} + +func (f lazyFlag) match(op syntax.EmptyOp) bool { + if op == 0 { + return true + } + r1 := rune(f >> 32) + if op&syntax.EmptyBeginLine != 0 { + if r1 != '\n' && r1 >= 0 { + return false + } + op &^= syntax.EmptyBeginLine + } + if op&syntax.EmptyBeginText != 0 { + if r1 >= 0 { + return false + } + op &^= syntax.EmptyBeginText + } + if op == 0 { + return true + } + r2 := rune(f) + if op&syntax.EmptyEndLine != 0 { + if r2 != '\n' && r2 >= 0 { + return false + } + op &^= syntax.EmptyEndLine + } + if op&syntax.EmptyEndText != 0 { + if r2 >= 0 { + return false + } + op &^= syntax.EmptyEndText + } + if op == 0 { + return true + } + if syntax.IsWordChar(r1) != syntax.IsWordChar(r2) { + op &^= syntax.EmptyWordBoundary + } else { + op &^= syntax.EmptyNoWordBoundary + } + return op == 0 +} + +// match runs the machine over the input starting at pos. +// It reports whether a match was found. +// If so, m.matchcap holds the submatch information. +func (m *machine) match(i input, pos int) bool { + startCond := m.re.cond + if startCond == ^syntax.EmptyOp(0) { // impossible + return false + } + m.matched = false + for i := range m.matchcap { + m.matchcap[i] = -1 + } + runq, nextq := &m.q0, &m.q1 + r, r1 := endOfText, endOfText + width, width1 := 0, 0 + r, width = i.step(pos) + if r != endOfText { + r1, width1 = i.step(pos + width) + } + var flag lazyFlag + if pos == 0 { + flag = newLazyFlag(-1, r) + } else { + flag = i.context(pos) + } + for { + if len(runq.dense) == 0 { + if startCond&syntax.EmptyBeginText != 0 && pos != 0 { + // Anchored match, past beginning of text. + break + } + if m.matched { + // Have match; finished exploring alternatives. + break + } + if len(m.re.prefix) > 0 && r1 != m.re.prefixRune && i.canCheckPrefix() { + // Match requires literal prefix; fast search for it. + advance := i.index(m.re, pos) + if advance < 0 { + break + } + pos += advance + r, width = i.step(pos) + r1, width1 = i.step(pos + width) + } + } + if !m.matched { + if len(m.matchcap) > 0 { + m.matchcap[0] = pos + } + m.add(runq, uint32(m.p.Start), pos, m.matchcap, &flag, nil) + } + flag = newLazyFlag(r, r1) + m.step(runq, nextq, pos, pos+width, r, &flag) + if width == 0 { + break + } + if len(m.matchcap) == 0 && m.matched { + // Found a match and not paying attention + // to where it is, so any match will do. + break + } + pos += width + r, width = r1, width1 + if r != endOfText { + r1, width1 = i.step(pos + width) + } + runq, nextq = nextq, runq + } + m.clear(nextq) + return m.matched +} + +// clear frees all threads on the thread queue. +func (m *machine) clear(q *queue) { + for _, d := range q.dense { + if d.t != nil { + m.pool = append(m.pool, d.t) + } + } + q.dense = q.dense[:0] +} + +// step executes one step of the machine, running each of the threads +// on runq and appending new threads to nextq. +// The step processes the rune c (which may be endOfText), +// which starts at position pos and ends at nextPos. +// nextCond gives the setting for the empty-width flags after c. +func (m *machine) step(runq, nextq *queue, pos, nextPos int, c rune, nextCond *lazyFlag) { + longest := m.re.longest + for j := 0; j < len(runq.dense); j++ { + d := &runq.dense[j] + t := d.t + if t == nil { + continue + } + if longest && m.matched && len(t.cap) > 0 && m.matchcap[0] < t.cap[0] { + m.pool = append(m.pool, t) + continue + } + i := t.inst + add := false + switch i.Op { + default: + panic("bad inst") + + case syntax.InstMatch: + if len(t.cap) > 0 && (!longest || !m.matched || m.matchcap[1] < pos) { + t.cap[1] = pos + copy(m.matchcap, t.cap) + } + if !longest { + // First-match mode: cut off all lower-priority threads. + for _, d := range runq.dense[j+1:] { + if d.t != nil { + m.pool = append(m.pool, d.t) + } + } + runq.dense = runq.dense[:0] + } + m.matched = true + + case syntax.InstRune: + add = i.MatchRune(c) + case syntax.InstRune1: + add = c == i.Rune[0] + case syntax.InstRuneAny: + add = true + case syntax.InstRuneAnyNotNL: + add = c != '\n' + } + if add { + t = m.add(nextq, i.Out, nextPos, t.cap, nextCond, t) + } + if t != nil { + m.pool = append(m.pool, t) + } + } + runq.dense = runq.dense[:0] +} + +// add adds an entry to q for pc, unless the q already has such an entry. +// It also recursively adds an entry for all instructions reachable from pc by following +// empty-width conditions satisfied by cond. pos gives the current position +// in the input. +func (m *machine) add(q *queue, pc uint32, pos int, cap []int, cond *lazyFlag, t *thread) *thread { +Again: + if pc == 0 { + return t + } + if j := q.sparse[pc]; j < uint32(len(q.dense)) && q.dense[j].pc == pc { + return t + } + + j := len(q.dense) + q.dense = q.dense[:j+1] + d := &q.dense[j] + d.t = nil + d.pc = pc + q.sparse[pc] = uint32(j) + + i := &m.p.Inst[pc] + switch i.Op { + default: + panic("unhandled") + case syntax.InstFail: + // nothing + case syntax.InstAlt, syntax.InstAltMatch: + t = m.add(q, i.Out, pos, cap, cond, t) + pc = i.Arg + goto Again + case syntax.InstEmptyWidth: + if cond.match(syntax.EmptyOp(i.Arg)) { + pc = i.Out + goto Again + } + case syntax.InstNop: + pc = i.Out + goto Again + case syntax.InstCapture: + if int(i.Arg) < len(cap) { + opos := cap[i.Arg] + cap[i.Arg] = pos + m.add(q, i.Out, pos, cap, cond, nil) + cap[i.Arg] = opos + } else { + pc = i.Out + goto Again + } + case syntax.InstMatch, syntax.InstRune, syntax.InstRune1, syntax.InstRuneAny, syntax.InstRuneAnyNotNL: + if t == nil { + t = m.alloc(i) + } else { + t.inst = i + } + if len(cap) > 0 && &t.cap[0] != &cap[0] { + copy(t.cap, cap) + } + d.t = t + t = nil + } + return t +} + +type onePassMachine struct { + inputs inputs + matchcap []int +} + +var onePassPool sync.Pool + +func newOnePassMachine() *onePassMachine { + m, ok := onePassPool.Get().(*onePassMachine) + if !ok { + m = new(onePassMachine) + } + return m +} + +func freeOnePassMachine(m *onePassMachine) { + m.inputs.clear() + onePassPool.Put(m) +} + +// doOnePass implements r.doExecute using the one-pass execution engine. +func (re *Regexp) doOnePass(ir io.RuneReader, ib []byte, is string, pos, ncap int, dstCap []int) []int { + startCond := re.cond + if startCond == ^syntax.EmptyOp(0) { // impossible + return nil + } + + m := newOnePassMachine() + if cap(m.matchcap) < ncap { + m.matchcap = make([]int, ncap) + } else { + m.matchcap = m.matchcap[:ncap] + } + + matched := false + for i := range m.matchcap { + m.matchcap[i] = -1 + } + + i, _ := m.inputs.init(ir, ib, is) + + r, r1 := endOfText, endOfText + width, width1 := 0, 0 + r, width = i.step(pos) + if r != endOfText { + r1, width1 = i.step(pos + width) + } + var flag lazyFlag + if pos == 0 { + flag = newLazyFlag(-1, r) + } else { + flag = i.context(pos) + } + pc := re.onepass.Start + inst := &re.onepass.Inst[pc] + // If there is a simple literal prefix, skip over it. + if pos == 0 && flag.match(syntax.EmptyOp(inst.Arg)) && + len(re.prefix) > 0 && i.canCheckPrefix() { + // Match requires literal prefix; fast search for it. + if !i.hasPrefix(re) { + goto Return + } + pos += len(re.prefix) + r, width = i.step(pos) + r1, width1 = i.step(pos + width) + flag = i.context(pos) + pc = int(re.prefixEnd) + } + for { + inst = &re.onepass.Inst[pc] + pc = int(inst.Out) + switch inst.Op { + default: + panic("bad inst") + case syntax.InstMatch: + matched = true + if len(m.matchcap) > 0 { + m.matchcap[0] = 0 + m.matchcap[1] = pos + } + goto Return + case syntax.InstRune: + if !inst.MatchRune(r) { + goto Return + } + case syntax.InstRune1: + if r != inst.Rune[0] { + goto Return + } + case syntax.InstRuneAny: + // Nothing + case syntax.InstRuneAnyNotNL: + if r == '\n' { + goto Return + } + // peek at the input rune to see which branch of the Alt to take + case syntax.InstAlt, syntax.InstAltMatch: + pc = int(onePassNext(inst, r)) + continue + case syntax.InstFail: + goto Return + case syntax.InstNop: + continue + case syntax.InstEmptyWidth: + if !flag.match(syntax.EmptyOp(inst.Arg)) { + goto Return + } + continue + case syntax.InstCapture: + if int(inst.Arg) < len(m.matchcap) { + m.matchcap[inst.Arg] = pos + } + continue + } + if width == 0 { + break + } + flag = newLazyFlag(r, r1) + pos += width + r, width = r1, width1 + if r != endOfText { + r1, width1 = i.step(pos + width) + } + } + +Return: + if !matched { + freeOnePassMachine(m) + return nil + } + + dstCap = append(dstCap, m.matchcap...) + freeOnePassMachine(m) + return dstCap +} + +// doMatch reports whether either r, b or s match the regexp. +func (re *Regexp) doMatch(r io.RuneReader, b []byte, s string) bool { + return re.doExecute(r, b, s, 0, 0, nil) != nil +} + +// doExecute finds the leftmost match in the input, appends the position +// of its subexpressions to dstCap and returns dstCap. +// +// nil is returned if no matches are found and non-nil if matches are found. +func (re *Regexp) doExecute(r io.RuneReader, b []byte, s string, pos int, ncap int, dstCap []int) []int { + if dstCap == nil { + // Make sure 'return dstCap' is non-nil. + dstCap = arrayNoInts[:0:0] + } + + if r == nil && len(b)+len(s) < re.minInputLen { + return nil + } + + if re.onepass != nil { + return re.doOnePass(r, b, s, pos, ncap, dstCap) + } + if r == nil && len(b)+len(s) < re.maxBitStateLen { + return re.backtrack(b, s, pos, ncap, dstCap) + } + + m := re.get() + i, _ := m.inputs.init(r, b, s) + + m.init(ncap) + if !m.match(i, pos) { + re.put(m) + return nil + } + + dstCap = append(dstCap, m.matchcap...) + re.put(m) + return dstCap +} + +// arrayNoInts is returned by doExecute match if nil dstCap is passed +// to it with ncap=0. +var arrayNoInts [0]int diff --git a/vendor/github.com/grafana/regexp/onepass.go b/vendor/github.com/grafana/regexp/onepass.go new file mode 100644 index 000000000..53cbd9583 --- /dev/null +++ b/vendor/github.com/grafana/regexp/onepass.go @@ -0,0 +1,500 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package regexp + +import ( + "regexp/syntax" + "slices" + "strings" + "unicode" + "unicode/utf8" +) + +// "One-pass" regexp execution. +// Some regexps can be analyzed to determine that they never need +// backtracking: they are guaranteed to run in one pass over the string +// without bothering to save all the usual NFA state. +// Detect those and execute them more quickly. + +// A onePassProg is a compiled one-pass regular expression program. +// It is the same as syntax.Prog except for the use of onePassInst. +type onePassProg struct { + Inst []onePassInst + Start int // index of start instruction + NumCap int // number of InstCapture insts in re +} + +// A onePassInst is a single instruction in a one-pass regular expression program. +// It is the same as syntax.Inst except for the new 'Next' field. +type onePassInst struct { + syntax.Inst + Next []uint32 +} + +// onePassPrefix returns a literal string that all matches for the +// regexp must start with. Complete is true if the prefix +// is the entire match. Pc is the index of the last rune instruction +// in the string. The onePassPrefix skips over the mandatory +// EmptyBeginText. +func onePassPrefix(p *syntax.Prog) (prefix string, complete bool, pc uint32) { + i := &p.Inst[p.Start] + if i.Op != syntax.InstEmptyWidth || (syntax.EmptyOp(i.Arg))&syntax.EmptyBeginText == 0 { + return "", i.Op == syntax.InstMatch, uint32(p.Start) + } + pc = i.Out + i = &p.Inst[pc] + for i.Op == syntax.InstNop { + pc = i.Out + i = &p.Inst[pc] + } + // Avoid allocation of buffer if prefix is empty. + if iop(i) != syntax.InstRune || len(i.Rune) != 1 { + return "", i.Op == syntax.InstMatch, uint32(p.Start) + } + + // Have prefix; gather characters. + var buf strings.Builder + for iop(i) == syntax.InstRune && len(i.Rune) == 1 && syntax.Flags(i.Arg)&syntax.FoldCase == 0 && i.Rune[0] != utf8.RuneError { + buf.WriteRune(i.Rune[0]) + pc, i = i.Out, &p.Inst[i.Out] + } + if i.Op == syntax.InstEmptyWidth && + syntax.EmptyOp(i.Arg)&syntax.EmptyEndText != 0 && + p.Inst[i.Out].Op == syntax.InstMatch { + complete = true + } + return buf.String(), complete, pc +} + +// onePassNext selects the next actionable state of the prog, based on the input character. +// It should only be called when i.Op == InstAlt or InstAltMatch, and from the one-pass machine. +// One of the alternates may ultimately lead without input to end of line. If the instruction +// is InstAltMatch the path to the InstMatch is in i.Out, the normal node in i.Next. +func onePassNext(i *onePassInst, r rune) uint32 { + next := i.MatchRunePos(r) + if next >= 0 { + return i.Next[next] + } + if i.Op == syntax.InstAltMatch { + return i.Out + } + return 0 +} + +func iop(i *syntax.Inst) syntax.InstOp { + op := i.Op + switch op { + case syntax.InstRune1, syntax.InstRuneAny, syntax.InstRuneAnyNotNL: + op = syntax.InstRune + } + return op +} + +// Sparse Array implementation is used as a queueOnePass. +type queueOnePass struct { + sparse []uint32 + dense []uint32 + size, nextIndex uint32 +} + +func (q *queueOnePass) empty() bool { + return q.nextIndex >= q.size +} + +func (q *queueOnePass) next() (n uint32) { + n = q.dense[q.nextIndex] + q.nextIndex++ + return +} + +func (q *queueOnePass) clear() { + q.size = 0 + q.nextIndex = 0 +} + +func (q *queueOnePass) contains(u uint32) bool { + if u >= uint32(len(q.sparse)) { + return false + } + return q.sparse[u] < q.size && q.dense[q.sparse[u]] == u +} + +func (q *queueOnePass) insert(u uint32) { + if !q.contains(u) { + q.insertNew(u) + } +} + +func (q *queueOnePass) insertNew(u uint32) { + if u >= uint32(len(q.sparse)) { + return + } + q.sparse[u] = q.size + q.dense[q.size] = u + q.size++ +} + +func newQueue(size int) (q *queueOnePass) { + return &queueOnePass{ + sparse: make([]uint32, size), + dense: make([]uint32, size), + } +} + +// mergeRuneSets merges two non-intersecting runesets, and returns the merged result, +// and a NextIp array. The idea is that if a rune matches the OnePassRunes at index +// i, NextIp[i/2] is the target. If the input sets intersect, an empty runeset and a +// NextIp array with the single element mergeFailed is returned. +// The code assumes that both inputs contain ordered and non-intersecting rune pairs. +const mergeFailed = uint32(0xffffffff) + +var ( + noRune = []rune{} + noNext = []uint32{mergeFailed} +) + +func mergeRuneSets(leftRunes, rightRunes *[]rune, leftPC, rightPC uint32) ([]rune, []uint32) { + leftLen := len(*leftRunes) + rightLen := len(*rightRunes) + if leftLen&0x1 != 0 || rightLen&0x1 != 0 { + panic("mergeRuneSets odd length []rune") + } + var ( + lx, rx int + ) + merged := make([]rune, 0) + next := make([]uint32, 0) + ok := true + defer func() { + if !ok { + merged = nil + next = nil + } + }() + + ix := -1 + extend := func(newLow *int, newArray *[]rune, pc uint32) bool { + if ix > 0 && (*newArray)[*newLow] <= merged[ix] { + return false + } + merged = append(merged, (*newArray)[*newLow], (*newArray)[*newLow+1]) + *newLow += 2 + ix += 2 + next = append(next, pc) + return true + } + + for lx < leftLen || rx < rightLen { + switch { + case rx >= rightLen: + ok = extend(&lx, leftRunes, leftPC) + case lx >= leftLen: + ok = extend(&rx, rightRunes, rightPC) + case (*rightRunes)[rx] < (*leftRunes)[lx]: + ok = extend(&rx, rightRunes, rightPC) + default: + ok = extend(&lx, leftRunes, leftPC) + } + if !ok { + return noRune, noNext + } + } + return merged, next +} + +// cleanupOnePass drops working memory, and restores certain shortcut instructions. +func cleanupOnePass(prog *onePassProg, original *syntax.Prog) { + for ix, instOriginal := range original.Inst { + switch instOriginal.Op { + case syntax.InstAlt, syntax.InstAltMatch, syntax.InstRune: + case syntax.InstCapture, syntax.InstEmptyWidth, syntax.InstNop, syntax.InstMatch, syntax.InstFail: + prog.Inst[ix].Next = nil + case syntax.InstRune1, syntax.InstRuneAny, syntax.InstRuneAnyNotNL: + prog.Inst[ix].Next = nil + prog.Inst[ix] = onePassInst{Inst: instOriginal} + } + } +} + +// onePassCopy creates a copy of the original Prog, as we'll be modifying it. +func onePassCopy(prog *syntax.Prog) *onePassProg { + p := &onePassProg{ + Start: prog.Start, + NumCap: prog.NumCap, + Inst: make([]onePassInst, len(prog.Inst)), + } + for i, inst := range prog.Inst { + p.Inst[i] = onePassInst{Inst: inst} + } + + // rewrites one or more common Prog constructs that enable some otherwise + // non-onepass Progs to be onepass. A:BD (for example) means an InstAlt at + // ip A, that points to ips B & C. + // A:BC + B:DA => A:BC + B:CD + // A:BC + B:DC => A:DC + B:DC + for pc := range p.Inst { + switch p.Inst[pc].Op { + default: + continue + case syntax.InstAlt, syntax.InstAltMatch: + // A:Bx + B:Ay + p_A_Other := &p.Inst[pc].Out + p_A_Alt := &p.Inst[pc].Arg + // make sure a target is another Alt + instAlt := p.Inst[*p_A_Alt] + if !(instAlt.Op == syntax.InstAlt || instAlt.Op == syntax.InstAltMatch) { + p_A_Alt, p_A_Other = p_A_Other, p_A_Alt + instAlt = p.Inst[*p_A_Alt] + if !(instAlt.Op == syntax.InstAlt || instAlt.Op == syntax.InstAltMatch) { + continue + } + } + instOther := p.Inst[*p_A_Other] + // Analyzing both legs pointing to Alts is for another day + if instOther.Op == syntax.InstAlt || instOther.Op == syntax.InstAltMatch { + // too complicated + continue + } + // simple empty transition loop + // A:BC + B:DA => A:BC + B:DC + p_B_Alt := &p.Inst[*p_A_Alt].Out + p_B_Other := &p.Inst[*p_A_Alt].Arg + patch := false + if instAlt.Out == uint32(pc) { + patch = true + } else if instAlt.Arg == uint32(pc) { + patch = true + p_B_Alt, p_B_Other = p_B_Other, p_B_Alt + } + if patch { + *p_B_Alt = *p_A_Other + } + + // empty transition to common target + // A:BC + B:DC => A:DC + B:DC + if *p_A_Other == *p_B_Alt { + *p_A_Alt = *p_B_Other + } + } + } + return p +} + +var anyRuneNotNL = []rune{0, '\n' - 1, '\n' + 1, unicode.MaxRune} +var anyRune = []rune{0, unicode.MaxRune} + +// makeOnePass creates a onepass Prog, if possible. It is possible if at any alt, +// the match engine can always tell which branch to take. The routine may modify +// p if it is turned into a onepass Prog. If it isn't possible for this to be a +// onepass Prog, the Prog nil is returned. makeOnePass is recursive +// to the size of the Prog. +func makeOnePass(p *onePassProg) *onePassProg { + // If the machine is very long, it's not worth the time to check if we can use one pass. + if len(p.Inst) >= 1000 { + return nil + } + + var ( + instQueue = newQueue(len(p.Inst)) + visitQueue = newQueue(len(p.Inst)) + check func(uint32, []bool) bool + onePassRunes = make([][]rune, len(p.Inst)) + ) + + // check that paths from Alt instructions are unambiguous, and rebuild the new + // program as a onepass program + check = func(pc uint32, m []bool) (ok bool) { + ok = true + inst := &p.Inst[pc] + if visitQueue.contains(pc) { + return + } + visitQueue.insert(pc) + switch inst.Op { + case syntax.InstAlt, syntax.InstAltMatch: + ok = check(inst.Out, m) && check(inst.Arg, m) + // check no-input paths to InstMatch + matchOut := m[inst.Out] + matchArg := m[inst.Arg] + if matchOut && matchArg { + ok = false + break + } + // Match on empty goes in inst.Out + if matchArg { + inst.Out, inst.Arg = inst.Arg, inst.Out + matchOut, matchArg = matchArg, matchOut + } + if matchOut { + m[pc] = true + inst.Op = syntax.InstAltMatch + } + + // build a dispatch operator from the two legs of the alt. + onePassRunes[pc], inst.Next = mergeRuneSets( + &onePassRunes[inst.Out], &onePassRunes[inst.Arg], inst.Out, inst.Arg) + if len(inst.Next) > 0 && inst.Next[0] == mergeFailed { + ok = false + break + } + case syntax.InstCapture, syntax.InstNop: + ok = check(inst.Out, m) + m[pc] = m[inst.Out] + // pass matching runes back through these no-ops. + onePassRunes[pc] = append([]rune{}, onePassRunes[inst.Out]...) + inst.Next = make([]uint32, len(onePassRunes[pc])/2+1) + for i := range inst.Next { + inst.Next[i] = inst.Out + } + case syntax.InstEmptyWidth: + ok = check(inst.Out, m) + m[pc] = m[inst.Out] + onePassRunes[pc] = append([]rune{}, onePassRunes[inst.Out]...) + inst.Next = make([]uint32, len(onePassRunes[pc])/2+1) + for i := range inst.Next { + inst.Next[i] = inst.Out + } + case syntax.InstMatch, syntax.InstFail: + m[pc] = inst.Op == syntax.InstMatch + case syntax.InstRune: + m[pc] = false + if len(inst.Next) > 0 { + break + } + instQueue.insert(inst.Out) + if len(inst.Rune) == 0 { + onePassRunes[pc] = []rune{} + inst.Next = []uint32{inst.Out} + break + } + runes := make([]rune, 0) + if len(inst.Rune) == 1 && syntax.Flags(inst.Arg)&syntax.FoldCase != 0 { + r0 := inst.Rune[0] + runes = append(runes, r0, r0) + for r1 := unicode.SimpleFold(r0); r1 != r0; r1 = unicode.SimpleFold(r1) { + runes = append(runes, r1, r1) + } + slices.Sort(runes) + } else { + runes = append(runes, inst.Rune...) + } + onePassRunes[pc] = runes + inst.Next = make([]uint32, len(onePassRunes[pc])/2+1) + for i := range inst.Next { + inst.Next[i] = inst.Out + } + inst.Op = syntax.InstRune + case syntax.InstRune1: + m[pc] = false + if len(inst.Next) > 0 { + break + } + instQueue.insert(inst.Out) + runes := []rune{} + // expand case-folded runes + if syntax.Flags(inst.Arg)&syntax.FoldCase != 0 { + r0 := inst.Rune[0] + runes = append(runes, r0, r0) + for r1 := unicode.SimpleFold(r0); r1 != r0; r1 = unicode.SimpleFold(r1) { + runes = append(runes, r1, r1) + } + slices.Sort(runes) + } else { + runes = append(runes, inst.Rune[0], inst.Rune[0]) + } + onePassRunes[pc] = runes + inst.Next = make([]uint32, len(onePassRunes[pc])/2+1) + for i := range inst.Next { + inst.Next[i] = inst.Out + } + inst.Op = syntax.InstRune + case syntax.InstRuneAny: + m[pc] = false + if len(inst.Next) > 0 { + break + } + instQueue.insert(inst.Out) + onePassRunes[pc] = append([]rune{}, anyRune...) + inst.Next = []uint32{inst.Out} + case syntax.InstRuneAnyNotNL: + m[pc] = false + if len(inst.Next) > 0 { + break + } + instQueue.insert(inst.Out) + onePassRunes[pc] = append([]rune{}, anyRuneNotNL...) + inst.Next = make([]uint32, len(onePassRunes[pc])/2+1) + for i := range inst.Next { + inst.Next[i] = inst.Out + } + } + return + } + + instQueue.clear() + instQueue.insert(uint32(p.Start)) + m := make([]bool, len(p.Inst)) + for !instQueue.empty() { + visitQueue.clear() + pc := instQueue.next() + if !check(pc, m) { + p = nil + break + } + } + if p != nil { + for i := range p.Inst { + p.Inst[i].Rune = onePassRunes[i] + } + } + return p +} + +// compileOnePass returns a new *syntax.Prog suitable for onePass execution if the original Prog +// can be recharacterized as a one-pass regexp program, or syntax.nil if the +// Prog cannot be converted. For a one pass prog, the fundamental condition that must +// be true is: at any InstAlt, there must be no ambiguity about what branch to take. +func compileOnePass(prog *syntax.Prog) (p *onePassProg) { + if prog.Start == 0 { + return nil + } + // onepass regexp is anchored + if prog.Inst[prog.Start].Op != syntax.InstEmptyWidth || + syntax.EmptyOp(prog.Inst[prog.Start].Arg)&syntax.EmptyBeginText != syntax.EmptyBeginText { + return nil + } + // every instruction leading to InstMatch must be EmptyEndText + for _, inst := range prog.Inst { + opOut := prog.Inst[inst.Out].Op + switch inst.Op { + default: + if opOut == syntax.InstMatch { + return nil + } + case syntax.InstAlt, syntax.InstAltMatch: + if opOut == syntax.InstMatch || prog.Inst[inst.Arg].Op == syntax.InstMatch { + return nil + } + case syntax.InstEmptyWidth: + if opOut == syntax.InstMatch { + if syntax.EmptyOp(inst.Arg)&syntax.EmptyEndText == syntax.EmptyEndText { + continue + } + return nil + } + } + } + // Creates a slightly optimized copy of the original Prog + // that cleans up some Prog idioms that block valid onepass programs + p = onePassCopy(prog) + + // checkAmbiguity on InstAlts, build onepass Prog if possible + p = makeOnePass(p) + + if p != nil { + cleanupOnePass(p, prog) + } + return p +} diff --git a/vendor/github.com/grafana/regexp/regexp.go b/vendor/github.com/grafana/regexp/regexp.go new file mode 100644 index 000000000..d1218ad0e --- /dev/null +++ b/vendor/github.com/grafana/regexp/regexp.go @@ -0,0 +1,1304 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package regexp implements regular expression search. +// +// The syntax of the regular expressions accepted is the same +// general syntax used by Perl, Python, and other languages. +// More precisely, it is the syntax accepted by RE2 and described at +// https://golang.org/s/re2syntax, except for \C. +// For an overview of the syntax, see the [regexp/syntax] package. +// +// The regexp implementation provided by this package is +// guaranteed to run in time linear in the size of the input. +// (This is a property not guaranteed by most open source +// implementations of regular expressions.) For more information +// about this property, see +// +// https://swtch.com/~rsc/regexp/regexp1.html +// +// or any book about automata theory. +// +// All characters are UTF-8-encoded code points. +// Following [utf8.DecodeRune], each byte of an invalid UTF-8 sequence +// is treated as if it encoded utf8.RuneError (U+FFFD). +// +// There are 16 methods of [Regexp] that match a regular expression and identify +// the matched text. Their names are matched by this regular expression: +// +// Find(All)?(String)?(Submatch)?(Index)? +// +// If 'All' is present, the routine matches successive non-overlapping +// matches of the entire expression. Empty matches abutting a preceding +// match are ignored. The return value is a slice containing the successive +// return values of the corresponding non-'All' routine. These routines take +// an extra integer argument, n. If n >= 0, the function returns at most n +// matches/submatches; otherwise, it returns all of them. +// +// If 'String' is present, the argument is a string; otherwise it is a slice +// of bytes; return values are adjusted as appropriate. +// +// If 'Submatch' is present, the return value is a slice identifying the +// successive submatches of the expression. Submatches are matches of +// parenthesized subexpressions (also known as capturing groups) within the +// regular expression, numbered from left to right in order of opening +// parenthesis. Submatch 0 is the match of the entire expression, submatch 1 is +// the match of the first parenthesized subexpression, and so on. +// +// If 'Index' is present, matches and submatches are identified by byte index +// pairs within the input string: result[2*n:2*n+2] identifies the indexes of +// the nth submatch. The pair for n==0 identifies the match of the entire +// expression. If 'Index' is not present, the match is identified by the text +// of the match/submatch. If an index is negative or text is nil, it means that +// subexpression did not match any string in the input. For 'String' versions +// an empty string means either no match or an empty match. +// +// There is also a subset of the methods that can be applied to text read +// from a RuneReader: +// +// MatchReader, FindReaderIndex, FindReaderSubmatchIndex +// +// This set may grow. Note that regular expression matches may need to +// examine text beyond the text returned by a match, so the methods that +// match text from a RuneReader may read arbitrarily far into the input +// before returning. +// +// (There are a few other methods that do not match this pattern.) +package regexp + +import ( + "bytes" + "io" + "regexp/syntax" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +// Regexp is the representation of a compiled regular expression. +// A Regexp is safe for concurrent use by multiple goroutines, +// except for configuration methods, such as [Regexp.Longest]. +type Regexp struct { + expr string // as passed to Compile + prog *syntax.Prog // compiled program + onepass *onePassProg // onepass program or nil + numSubexp int + maxBitStateLen int + subexpNames []string + prefix string // required prefix in unanchored matches + prefixBytes []byte // prefix, as a []byte + prefixRune rune // first rune in prefix + prefixEnd uint32 // pc for last rune in prefix + mpool int // pool for machines + matchcap int // size of recorded match lengths + prefixComplete bool // prefix is the entire regexp + cond syntax.EmptyOp // empty-width conditions required at start of match + minInputLen int // minimum length of the input in bytes + + // This field can be modified by the Longest method, + // but it is otherwise read-only. + longest bool // whether regexp prefers leftmost-longest match +} + +// String returns the source text used to compile the regular expression. +func (re *Regexp) String() string { + return re.expr +} + +// Copy returns a new [Regexp] object copied from re. +// Calling [Regexp.Longest] on one copy does not affect another. +// +// Deprecated: In earlier releases, when using a [Regexp] in multiple goroutines, +// giving each goroutine its own copy helped to avoid lock contention. +// As of Go 1.12, using Copy is no longer necessary to avoid lock contention. +// Copy may still be appropriate if the reason for its use is to make +// two copies with different [Regexp.Longest] settings. +func (re *Regexp) Copy() *Regexp { + re2 := *re + return &re2 +} + +// Compile parses a regular expression and returns, if successful, +// a [Regexp] object that can be used to match against text. +// +// When matching against text, the regexp returns a match that +// begins as early as possible in the input (leftmost), and among those +// it chooses the one that a backtracking search would have found first. +// This so-called leftmost-first matching is the same semantics +// that Perl, Python, and other implementations use, although this +// package implements it without the expense of backtracking. +// For POSIX leftmost-longest matching, see [CompilePOSIX]. +func Compile(expr string) (*Regexp, error) { + return compile(expr, syntax.Perl, false) +} + +// CompilePOSIX is like [Compile] but restricts the regular expression +// to POSIX ERE (egrep) syntax and changes the match semantics to +// leftmost-longest. +// +// That is, when matching against text, the regexp returns a match that +// begins as early as possible in the input (leftmost), and among those +// it chooses a match that is as long as possible. +// This so-called leftmost-longest matching is the same semantics +// that early regular expression implementations used and that POSIX +// specifies. +// +// However, there can be multiple leftmost-longest matches, with different +// submatch choices, and here this package diverges from POSIX. +// Among the possible leftmost-longest matches, this package chooses +// the one that a backtracking search would have found first, while POSIX +// specifies that the match be chosen to maximize the length of the first +// subexpression, then the second, and so on from left to right. +// The POSIX rule is computationally prohibitive and not even well-defined. +// See https://swtch.com/~rsc/regexp/regexp2.html#posix for details. +func CompilePOSIX(expr string) (*Regexp, error) { + return compile(expr, syntax.POSIX, true) +} + +// Longest makes future searches prefer the leftmost-longest match. +// That is, when matching against text, the regexp returns a match that +// begins as early as possible in the input (leftmost), and among those +// it chooses a match that is as long as possible. +// This method modifies the [Regexp] and may not be called concurrently +// with any other methods. +func (re *Regexp) Longest() { + re.longest = true +} + +func compile(expr string, mode syntax.Flags, longest bool) (*Regexp, error) { + re, err := syntax.Parse(expr, mode) + if err != nil { + return nil, err + } + maxCap := re.MaxCap() + capNames := re.CapNames() + + re = re.Simplify() + prog, err := syntax.Compile(re) + if err != nil { + return nil, err + } + matchcap := prog.NumCap + if matchcap < 2 { + matchcap = 2 + } + regexp := &Regexp{ + expr: expr, + prog: prog, + onepass: compileOnePass(prog), + numSubexp: maxCap, + subexpNames: capNames, + cond: prog.StartCond(), + longest: longest, + matchcap: matchcap, + minInputLen: minInputLen(re), + } + if regexp.onepass == nil { + regexp.prefix, regexp.prefixComplete = prog.Prefix() + regexp.maxBitStateLen = maxBitStateLen(prog) + } else { + regexp.prefix, regexp.prefixComplete, regexp.prefixEnd = onePassPrefix(prog) + } + if regexp.prefix != "" { + // TODO(rsc): Remove this allocation by adding + // IndexString to package bytes. + regexp.prefixBytes = []byte(regexp.prefix) + regexp.prefixRune, _ = utf8.DecodeRuneInString(regexp.prefix) + } + + n := len(prog.Inst) + i := 0 + for matchSize[i] != 0 && matchSize[i] < n { + i++ + } + regexp.mpool = i + + return regexp, nil +} + +// Pools of *machine for use during (*Regexp).doExecute, +// split up by the size of the execution queues. +// matchPool[i] machines have queue size matchSize[i]. +// On a 64-bit system each queue entry is 16 bytes, +// so matchPool[0] has 16*2*128 = 4kB queues, etc. +// The final matchPool is a catch-all for very large queues. +var ( + matchSize = [...]int{128, 512, 2048, 16384, 0} + matchPool [len(matchSize)]sync.Pool +) + +// get returns a machine to use for matching re. +// It uses the re's machine cache if possible, to avoid +// unnecessary allocation. +func (re *Regexp) get() *machine { + m, ok := matchPool[re.mpool].Get().(*machine) + if !ok { + m = new(machine) + } + m.re = re + m.p = re.prog + if cap(m.matchcap) < re.matchcap { + m.matchcap = make([]int, re.matchcap) + for _, t := range m.pool { + t.cap = make([]int, re.matchcap) + } + } + + // Allocate queues if needed. + // Or reallocate, for "large" match pool. + n := matchSize[re.mpool] + if n == 0 { // large pool + n = len(re.prog.Inst) + } + if len(m.q0.sparse) < n { + m.q0 = queue{make([]uint32, n), make([]entry, 0, n)} + m.q1 = queue{make([]uint32, n), make([]entry, 0, n)} + } + return m +} + +// put returns a machine to the correct machine pool. +func (re *Regexp) put(m *machine) { + m.re = nil + m.p = nil + m.inputs.clear() + matchPool[re.mpool].Put(m) +} + +// minInputLen walks the regexp to find the minimum length of any matchable input. +func minInputLen(re *syntax.Regexp) int { + switch re.Op { + default: + return 0 + case syntax.OpAnyChar, syntax.OpAnyCharNotNL, syntax.OpCharClass: + return 1 + case syntax.OpLiteral: + l := 0 + for _, r := range re.Rune { + if r == utf8.RuneError { + l++ + } else { + l += utf8.RuneLen(r) + } + } + return l + case syntax.OpCapture, syntax.OpPlus: + return minInputLen(re.Sub[0]) + case syntax.OpRepeat: + return re.Min * minInputLen(re.Sub[0]) + case syntax.OpConcat: + l := 0 + for _, sub := range re.Sub { + l += minInputLen(sub) + } + return l + case syntax.OpAlternate: + l := minInputLen(re.Sub[0]) + var lnext int + for _, sub := range re.Sub[1:] { + lnext = minInputLen(sub) + if lnext < l { + l = lnext + } + } + return l + } +} + +// MustCompile is like [Compile] but panics if the expression cannot be parsed. +// It simplifies safe initialization of global variables holding compiled regular +// expressions. +func MustCompile(str string) *Regexp { + regexp, err := Compile(str) + if err != nil { + panic(`regexp: Compile(` + quote(str) + `): ` + err.Error()) + } + return regexp +} + +// MustCompilePOSIX is like [CompilePOSIX] but panics if the expression cannot be parsed. +// It simplifies safe initialization of global variables holding compiled regular +// expressions. +func MustCompilePOSIX(str string) *Regexp { + regexp, err := CompilePOSIX(str) + if err != nil { + panic(`regexp: CompilePOSIX(` + quote(str) + `): ` + err.Error()) + } + return regexp +} + +func quote(s string) string { + if strconv.CanBackquote(s) { + return "`" + s + "`" + } + return strconv.Quote(s) +} + +// NumSubexp returns the number of parenthesized subexpressions in this [Regexp]. +func (re *Regexp) NumSubexp() int { + return re.numSubexp +} + +// SubexpNames returns the names of the parenthesized subexpressions +// in this [Regexp]. The name for the first sub-expression is names[1], +// so that if m is a match slice, the name for m[i] is SubexpNames()[i]. +// Since the Regexp as a whole cannot be named, names[0] is always +// the empty string. The slice should not be modified. +func (re *Regexp) SubexpNames() []string { + return re.subexpNames +} + +// SubexpIndex returns the index of the first subexpression with the given name, +// or -1 if there is no subexpression with that name. +// +// Note that multiple subexpressions can be written using the same name, as in +// (?Pa+)(?Pb+), which declares two subexpressions named "bob". +// In this case, SubexpIndex returns the index of the leftmost such subexpression +// in the regular expression. +func (re *Regexp) SubexpIndex(name string) int { + if name != "" { + for i, s := range re.subexpNames { + if name == s { + return i + } + } + } + return -1 +} + +const endOfText rune = -1 + +// input abstracts different representations of the input text. It provides +// one-character lookahead. +type input interface { + step(pos int) (r rune, width int) // advance one rune + canCheckPrefix() bool // can we look ahead without losing info? + hasPrefix(re *Regexp) bool + index(re *Regexp, pos int) int + context(pos int) lazyFlag +} + +// inputString scans a string. +type inputString struct { + str string +} + +func (i *inputString) step(pos int) (rune, int) { + if pos < len(i.str) { + c := i.str[pos] + if c < utf8.RuneSelf { + return rune(c), 1 + } + return utf8.DecodeRuneInString(i.str[pos:]) + } + return endOfText, 0 +} + +func (i *inputString) canCheckPrefix() bool { + return true +} + +func (i *inputString) hasPrefix(re *Regexp) bool { + return strings.HasPrefix(i.str, re.prefix) +} + +func (i *inputString) index(re *Regexp, pos int) int { + return strings.Index(i.str[pos:], re.prefix) +} + +func (i *inputString) context(pos int) lazyFlag { + r1, r2 := endOfText, endOfText + // 0 < pos && pos <= len(i.str) + if uint(pos-1) < uint(len(i.str)) { + r1 = rune(i.str[pos-1]) + if r1 >= utf8.RuneSelf { + r1, _ = utf8.DecodeLastRuneInString(i.str[:pos]) + } + } + // 0 <= pos && pos < len(i.str) + if uint(pos) < uint(len(i.str)) { + r2 = rune(i.str[pos]) + if r2 >= utf8.RuneSelf { + r2, _ = utf8.DecodeRuneInString(i.str[pos:]) + } + } + return newLazyFlag(r1, r2) +} + +// inputBytes scans a byte slice. +type inputBytes struct { + str []byte +} + +func (i *inputBytes) step(pos int) (rune, int) { + if pos < len(i.str) { + c := i.str[pos] + if c < utf8.RuneSelf { + return rune(c), 1 + } + return utf8.DecodeRune(i.str[pos:]) + } + return endOfText, 0 +} + +func (i *inputBytes) canCheckPrefix() bool { + return true +} + +func (i *inputBytes) hasPrefix(re *Regexp) bool { + return bytes.HasPrefix(i.str, re.prefixBytes) +} + +func (i *inputBytes) index(re *Regexp, pos int) int { + return bytes.Index(i.str[pos:], re.prefixBytes) +} + +func (i *inputBytes) context(pos int) lazyFlag { + r1, r2 := endOfText, endOfText + // 0 < pos && pos <= len(i.str) + if uint(pos-1) < uint(len(i.str)) { + r1 = rune(i.str[pos-1]) + if r1 >= utf8.RuneSelf { + r1, _ = utf8.DecodeLastRune(i.str[:pos]) + } + } + // 0 <= pos && pos < len(i.str) + if uint(pos) < uint(len(i.str)) { + r2 = rune(i.str[pos]) + if r2 >= utf8.RuneSelf { + r2, _ = utf8.DecodeRune(i.str[pos:]) + } + } + return newLazyFlag(r1, r2) +} + +// inputReader scans a RuneReader. +type inputReader struct { + r io.RuneReader + atEOT bool + pos int +} + +func (i *inputReader) step(pos int) (rune, int) { + if !i.atEOT && pos != i.pos { + return endOfText, 0 + + } + r, w, err := i.r.ReadRune() + if err != nil { + i.atEOT = true + return endOfText, 0 + } + i.pos += w + return r, w +} + +func (i *inputReader) canCheckPrefix() bool { + return false +} + +func (i *inputReader) hasPrefix(re *Regexp) bool { + return false +} + +func (i *inputReader) index(re *Regexp, pos int) int { + return -1 +} + +func (i *inputReader) context(pos int) lazyFlag { + return 0 // not used +} + +// LiteralPrefix returns a literal string that must begin any match +// of the regular expression re. It returns the boolean true if the +// literal string comprises the entire regular expression. +func (re *Regexp) LiteralPrefix() (prefix string, complete bool) { + return re.prefix, re.prefixComplete +} + +// MatchReader reports whether the text returned by the [io.RuneReader] +// contains any match of the regular expression re. +func (re *Regexp) MatchReader(r io.RuneReader) bool { + return re.doMatch(r, nil, "") +} + +// MatchString reports whether the string s +// contains any match of the regular expression re. +func (re *Regexp) MatchString(s string) bool { + return re.doMatch(nil, nil, s) +} + +// Match reports whether the byte slice b +// contains any match of the regular expression re. +func (re *Regexp) Match(b []byte) bool { + return re.doMatch(nil, b, "") +} + +// MatchReader reports whether the text returned by the RuneReader +// contains any match of the regular expression pattern. +// More complicated queries need to use [Compile] and the full [Regexp] interface. +func MatchReader(pattern string, r io.RuneReader) (matched bool, err error) { + re, err := Compile(pattern) + if err != nil { + return false, err + } + return re.MatchReader(r), nil +} + +// MatchString reports whether the string s +// contains any match of the regular expression pattern. +// More complicated queries need to use [Compile] and the full [Regexp] interface. +func MatchString(pattern string, s string) (matched bool, err error) { + re, err := Compile(pattern) + if err != nil { + return false, err + } + return re.MatchString(s), nil +} + +// Match reports whether the byte slice b +// contains any match of the regular expression pattern. +// More complicated queries need to use [Compile] and the full [Regexp] interface. +func Match(pattern string, b []byte) (matched bool, err error) { + re, err := Compile(pattern) + if err != nil { + return false, err + } + return re.Match(b), nil +} + +// ReplaceAllString returns a copy of src, replacing matches of the [Regexp] +// with the replacement string repl. +// Inside repl, $ signs are interpreted as in [Regexp.Expand]. +func (re *Regexp) ReplaceAllString(src, repl string) string { + n := 2 + if strings.Contains(repl, "$") { + n = 2 * (re.numSubexp + 1) + } + b := re.replaceAll(nil, src, n, func(dst []byte, match []int) []byte { + return re.expand(dst, repl, nil, src, match) + }) + return string(b) +} + +// ReplaceAllLiteralString returns a copy of src, replacing matches of the [Regexp] +// with the replacement string repl. The replacement repl is substituted directly, +// without using [Regexp.Expand]. +func (re *Regexp) ReplaceAllLiteralString(src, repl string) string { + return string(re.replaceAll(nil, src, 2, func(dst []byte, match []int) []byte { + return append(dst, repl...) + })) +} + +// ReplaceAllStringFunc returns a copy of src in which all matches of the +// [Regexp] have been replaced by the return value of function repl applied +// to the matched substring. The replacement returned by repl is substituted +// directly, without using [Regexp.Expand]. +func (re *Regexp) ReplaceAllStringFunc(src string, repl func(string) string) string { + b := re.replaceAll(nil, src, 2, func(dst []byte, match []int) []byte { + return append(dst, repl(src[match[0]:match[1]])...) + }) + return string(b) +} + +func (re *Regexp) replaceAll(bsrc []byte, src string, nmatch int, repl func(dst []byte, m []int) []byte) []byte { + lastMatchEnd := 0 // end position of the most recent match + searchPos := 0 // position where we next look for a match + var buf []byte + var endPos int + if bsrc != nil { + endPos = len(bsrc) + } else { + endPos = len(src) + } + if nmatch > re.prog.NumCap { + nmatch = re.prog.NumCap + } + + var dstCap [2]int + for searchPos <= endPos { + a := re.doExecute(nil, bsrc, src, searchPos, nmatch, dstCap[:0]) + if len(a) == 0 { + break // no more matches + } + + // Copy the unmatched characters before this match. + if bsrc != nil { + buf = append(buf, bsrc[lastMatchEnd:a[0]]...) + } else { + buf = append(buf, src[lastMatchEnd:a[0]]...) + } + + // Now insert a copy of the replacement string, but not for a + // match of the empty string immediately after another match. + // (Otherwise, we get double replacement for patterns that + // match both empty and nonempty strings.) + if a[1] > lastMatchEnd || a[0] == 0 { + buf = repl(buf, a) + } + lastMatchEnd = a[1] + + // Advance past this match; always advance at least one character. + var width int + if bsrc != nil { + _, width = utf8.DecodeRune(bsrc[searchPos:]) + } else { + _, width = utf8.DecodeRuneInString(src[searchPos:]) + } + if searchPos+width > a[1] { + searchPos += width + } else if searchPos+1 > a[1] { + // This clause is only needed at the end of the input + // string. In that case, DecodeRuneInString returns width=0. + searchPos++ + } else { + searchPos = a[1] + } + } + + // Copy the unmatched characters after the last match. + if bsrc != nil { + buf = append(buf, bsrc[lastMatchEnd:]...) + } else { + buf = append(buf, src[lastMatchEnd:]...) + } + + return buf +} + +// ReplaceAll returns a copy of src, replacing matches of the [Regexp] +// with the replacement text repl. +// Inside repl, $ signs are interpreted as in [Regexp.Expand]. +func (re *Regexp) ReplaceAll(src, repl []byte) []byte { + n := 2 + if bytes.IndexByte(repl, '$') >= 0 { + n = 2 * (re.numSubexp + 1) + } + srepl := "" + b := re.replaceAll(src, "", n, func(dst []byte, match []int) []byte { + if len(srepl) != len(repl) { + srepl = string(repl) + } + return re.expand(dst, srepl, src, "", match) + }) + return b +} + +// ReplaceAllLiteral returns a copy of src, replacing matches of the [Regexp] +// with the replacement bytes repl. The replacement repl is substituted directly, +// without using [Regexp.Expand]. +func (re *Regexp) ReplaceAllLiteral(src, repl []byte) []byte { + return re.replaceAll(src, "", 2, func(dst []byte, match []int) []byte { + return append(dst, repl...) + }) +} + +// ReplaceAllFunc returns a copy of src in which all matches of the +// [Regexp] have been replaced by the return value of function repl applied +// to the matched byte slice. The replacement returned by repl is substituted +// directly, without using [Regexp.Expand]. +func (re *Regexp) ReplaceAllFunc(src []byte, repl func([]byte) []byte) []byte { + return re.replaceAll(src, "", 2, func(dst []byte, match []int) []byte { + return append(dst, repl(src[match[0]:match[1]])...) + }) +} + +// Bitmap used by func special to check whether a character needs to be escaped. +var specialBytes [16]byte + +// special reports whether byte b needs to be escaped by QuoteMeta. +func special(b byte) bool { + return b < utf8.RuneSelf && specialBytes[b%16]&(1<<(b/16)) != 0 +} + +func init() { + for _, b := range []byte(`\.+*?()|[]{}^$`) { + specialBytes[b%16] |= 1 << (b / 16) + } +} + +// QuoteMeta returns a string that escapes all regular expression metacharacters +// inside the argument text; the returned string is a regular expression matching +// the literal text. +func QuoteMeta(s string) string { + // A byte loop is correct because all metacharacters are ASCII. + var i int + for i = 0; i < len(s); i++ { + if special(s[i]) { + break + } + } + // No meta characters found, so return original string. + if i >= len(s) { + return s + } + + b := make([]byte, 2*len(s)-i) + copy(b, s[:i]) + j := i + for ; i < len(s); i++ { + if special(s[i]) { + b[j] = '\\' + j++ + } + b[j] = s[i] + j++ + } + return string(b[:j]) +} + +// The number of capture values in the program may correspond +// to fewer capturing expressions than are in the regexp. +// For example, "(a){0}" turns into an empty program, so the +// maximum capture in the program is 0 but we need to return +// an expression for \1. Pad appends -1s to the slice a as needed. +func (re *Regexp) pad(a []int) []int { + if a == nil { + // No match. + return nil + } + n := (1 + re.numSubexp) * 2 + for len(a) < n { + a = append(a, -1) + } + return a +} + +// allMatches calls deliver at most n times +// with the location of successive matches in the input text. +// The input text is b if non-nil, otherwise s. +func (re *Regexp) allMatches(s string, b []byte, n int, deliver func([]int)) { + var end int + if b == nil { + end = len(s) + } else { + end = len(b) + } + + for pos, i, prevMatchEnd := 0, 0, -1; i < n && pos <= end; { + matches := re.doExecute(nil, b, s, pos, re.prog.NumCap, nil) + if len(matches) == 0 { + break + } + + accept := true + if matches[1] == pos { + // We've found an empty match. + if matches[0] == prevMatchEnd { + // We don't allow an empty match right + // after a previous match, so ignore it. + accept = false + } + var width int + if b == nil { + is := inputString{str: s} + _, width = is.step(pos) + } else { + ib := inputBytes{str: b} + _, width = ib.step(pos) + } + if width > 0 { + pos += width + } else { + pos = end + 1 + } + } else { + pos = matches[1] + } + prevMatchEnd = matches[1] + + if accept { + deliver(re.pad(matches)) + i++ + } + } +} + +// Find returns a slice holding the text of the leftmost match in b of the regular expression. +// A return value of nil indicates no match. +func (re *Regexp) Find(b []byte) []byte { + var dstCap [2]int + a := re.doExecute(nil, b, "", 0, 2, dstCap[:0]) + if a == nil { + return nil + } + return b[a[0]:a[1]:a[1]] +} + +// FindIndex returns a two-element slice of integers defining the location of +// the leftmost match in b of the regular expression. The match itself is at +// b[loc[0]:loc[1]]. +// A return value of nil indicates no match. +func (re *Regexp) FindIndex(b []byte) (loc []int) { + a := re.doExecute(nil, b, "", 0, 2, nil) + if a == nil { + return nil + } + return a[0:2] +} + +// FindString returns a string holding the text of the leftmost match in s of the regular +// expression. If there is no match, the return value is an empty string, +// but it will also be empty if the regular expression successfully matches +// an empty string. Use [Regexp.FindStringIndex] or [Regexp.FindStringSubmatch] if it is +// necessary to distinguish these cases. +func (re *Regexp) FindString(s string) string { + var dstCap [2]int + a := re.doExecute(nil, nil, s, 0, 2, dstCap[:0]) + if a == nil { + return "" + } + return s[a[0]:a[1]] +} + +// FindStringIndex returns a two-element slice of integers defining the +// location of the leftmost match in s of the regular expression. The match +// itself is at s[loc[0]:loc[1]]. +// A return value of nil indicates no match. +func (re *Regexp) FindStringIndex(s string) (loc []int) { + a := re.doExecute(nil, nil, s, 0, 2, nil) + if a == nil { + return nil + } + return a[0:2] +} + +// FindReaderIndex returns a two-element slice of integers defining the +// location of the leftmost match of the regular expression in text read from +// the [io.RuneReader]. The match text was found in the input stream at +// byte offset loc[0] through loc[1]-1. +// A return value of nil indicates no match. +func (re *Regexp) FindReaderIndex(r io.RuneReader) (loc []int) { + a := re.doExecute(r, nil, "", 0, 2, nil) + if a == nil { + return nil + } + return a[0:2] +} + +// FindSubmatch returns a slice of slices holding the text of the leftmost +// match of the regular expression in b and the matches, if any, of its +// subexpressions, as defined by the 'Submatch' descriptions in the package +// comment. +// A return value of nil indicates no match. +func (re *Regexp) FindSubmatch(b []byte) [][]byte { + var dstCap [4]int + a := re.doExecute(nil, b, "", 0, re.prog.NumCap, dstCap[:0]) + if a == nil { + return nil + } + ret := make([][]byte, 1+re.numSubexp) + for i := range ret { + if 2*i < len(a) && a[2*i] >= 0 { + ret[i] = b[a[2*i]:a[2*i+1]:a[2*i+1]] + } + } + return ret +} + +// Expand appends template to dst and returns the result; during the +// append, Expand replaces variables in the template with corresponding +// matches drawn from src. The match slice should have been returned by +// [Regexp.FindSubmatchIndex]. +// +// In the template, a variable is denoted by a substring of the form +// $name or ${name}, where name is a non-empty sequence of letters, +// digits, and underscores. A purely numeric name like $1 refers to +// the submatch with the corresponding index; other names refer to +// capturing parentheses named with the (?P...) syntax. A +// reference to an out of range or unmatched index or a name that is not +// present in the regular expression is replaced with an empty slice. +// +// In the $name form, name is taken to be as long as possible: $1x is +// equivalent to ${1x}, not ${1}x, and, $10 is equivalent to ${10}, not ${1}0. +// +// To insert a literal $ in the output, use $$ in the template. +func (re *Regexp) Expand(dst []byte, template []byte, src []byte, match []int) []byte { + return re.expand(dst, string(template), src, "", match) +} + +// ExpandString is like [Regexp.Expand] but the template and source are strings. +// It appends to and returns a byte slice in order to give the calling +// code control over allocation. +func (re *Regexp) ExpandString(dst []byte, template string, src string, match []int) []byte { + return re.expand(dst, template, nil, src, match) +} + +func (re *Regexp) expand(dst []byte, template string, bsrc []byte, src string, match []int) []byte { + for len(template) > 0 { + before, after, ok := strings.Cut(template, "$") + if !ok { + break + } + dst = append(dst, before...) + template = after + if template != "" && template[0] == '$' { + // Treat $$ as $. + dst = append(dst, '$') + template = template[1:] + continue + } + name, num, rest, ok := extract(template) + if !ok { + // Malformed; treat $ as raw text. + dst = append(dst, '$') + continue + } + template = rest + if num >= 0 { + if 2*num+1 < len(match) && match[2*num] >= 0 { + if bsrc != nil { + dst = append(dst, bsrc[match[2*num]:match[2*num+1]]...) + } else { + dst = append(dst, src[match[2*num]:match[2*num+1]]...) + } + } + } else { + for i, namei := range re.subexpNames { + if name == namei && 2*i+1 < len(match) && match[2*i] >= 0 { + if bsrc != nil { + dst = append(dst, bsrc[match[2*i]:match[2*i+1]]...) + } else { + dst = append(dst, src[match[2*i]:match[2*i+1]]...) + } + break + } + } + } + } + dst = append(dst, template...) + return dst +} + +// extract returns the name from a leading "name" or "{name}" in str. +// (The $ has already been removed by the caller.) +// If it is a number, extract returns num set to that number; otherwise num = -1. +func extract(str string) (name string, num int, rest string, ok bool) { + if str == "" { + return + } + brace := false + if str[0] == '{' { + brace = true + str = str[1:] + } + i := 0 + for i < len(str) { + rune, size := utf8.DecodeRuneInString(str[i:]) + if !unicode.IsLetter(rune) && !unicode.IsDigit(rune) && rune != '_' { + break + } + i += size + } + if i == 0 { + // empty name is not okay + return + } + name = str[:i] + if brace { + if i >= len(str) || str[i] != '}' { + // missing closing brace + return + } + i++ + } + + // Parse number. + num = 0 + for i := 0; i < len(name); i++ { + if name[i] < '0' || '9' < name[i] || num >= 1e8 { + num = -1 + break + } + num = num*10 + int(name[i]) - '0' + } + // Disallow leading zeros. + if name[0] == '0' && len(name) > 1 { + num = -1 + } + + rest = str[i:] + ok = true + return +} + +// FindSubmatchIndex returns a slice holding the index pairs identifying the +// leftmost match of the regular expression in b and the matches, if any, of +// its subexpressions, as defined by the 'Submatch' and 'Index' descriptions +// in the package comment. +// A return value of nil indicates no match. +func (re *Regexp) FindSubmatchIndex(b []byte) []int { + return re.pad(re.doExecute(nil, b, "", 0, re.prog.NumCap, nil)) +} + +// FindStringSubmatch returns a slice of strings holding the text of the +// leftmost match of the regular expression in s and the matches, if any, of +// its subexpressions, as defined by the 'Submatch' description in the +// package comment. +// A return value of nil indicates no match. +func (re *Regexp) FindStringSubmatch(s string) []string { + var dstCap [4]int + a := re.doExecute(nil, nil, s, 0, re.prog.NumCap, dstCap[:0]) + if a == nil { + return nil + } + ret := make([]string, 1+re.numSubexp) + for i := range ret { + if 2*i < len(a) && a[2*i] >= 0 { + ret[i] = s[a[2*i]:a[2*i+1]] + } + } + return ret +} + +// FindStringSubmatchIndex returns a slice holding the index pairs +// identifying the leftmost match of the regular expression in s and the +// matches, if any, of its subexpressions, as defined by the 'Submatch' and +// 'Index' descriptions in the package comment. +// A return value of nil indicates no match. +func (re *Regexp) FindStringSubmatchIndex(s string) []int { + return re.pad(re.doExecute(nil, nil, s, 0, re.prog.NumCap, nil)) +} + +// FindReaderSubmatchIndex returns a slice holding the index pairs +// identifying the leftmost match of the regular expression of text read by +// the [io.RuneReader], and the matches, if any, of its subexpressions, as defined +// by the 'Submatch' and 'Index' descriptions in the package comment. A +// return value of nil indicates no match. +func (re *Regexp) FindReaderSubmatchIndex(r io.RuneReader) []int { + return re.pad(re.doExecute(r, nil, "", 0, re.prog.NumCap, nil)) +} + +const startSize = 10 // The size at which to start a slice in the 'All' routines. + +// FindAll is the 'All' version of [Regexp.Find]; it returns a slice of all successive +// matches of the expression, as defined by the 'All' description in the +// package comment. +// A return value of nil indicates no match. +func (re *Regexp) FindAll(b []byte, n int) [][]byte { + if n < 0 { + n = len(b) + 1 + } + var result [][]byte + re.allMatches("", b, n, func(match []int) { + if result == nil { + result = make([][]byte, 0, startSize) + } + result = append(result, b[match[0]:match[1]:match[1]]) + }) + return result +} + +// FindAllIndex is the 'All' version of [Regexp.FindIndex]; it returns a slice of all +// successive matches of the expression, as defined by the 'All' description +// in the package comment. +// A return value of nil indicates no match. +func (re *Regexp) FindAllIndex(b []byte, n int) [][]int { + if n < 0 { + n = len(b) + 1 + } + var result [][]int + re.allMatches("", b, n, func(match []int) { + if result == nil { + result = make([][]int, 0, startSize) + } + result = append(result, match[0:2]) + }) + return result +} + +// FindAllString is the 'All' version of [Regexp.FindString]; it returns a slice of all +// successive matches of the expression, as defined by the 'All' description +// in the package comment. +// A return value of nil indicates no match. +func (re *Regexp) FindAllString(s string, n int) []string { + if n < 0 { + n = len(s) + 1 + } + var result []string + re.allMatches(s, nil, n, func(match []int) { + if result == nil { + result = make([]string, 0, startSize) + } + result = append(result, s[match[0]:match[1]]) + }) + return result +} + +// FindAllStringIndex is the 'All' version of [Regexp.FindStringIndex]; it returns a +// slice of all successive matches of the expression, as defined by the 'All' +// description in the package comment. +// A return value of nil indicates no match. +func (re *Regexp) FindAllStringIndex(s string, n int) [][]int { + if n < 0 { + n = len(s) + 1 + } + var result [][]int + re.allMatches(s, nil, n, func(match []int) { + if result == nil { + result = make([][]int, 0, startSize) + } + result = append(result, match[0:2]) + }) + return result +} + +// FindAllSubmatch is the 'All' version of [Regexp.FindSubmatch]; it returns a slice +// of all successive matches of the expression, as defined by the 'All' +// description in the package comment. +// A return value of nil indicates no match. +func (re *Regexp) FindAllSubmatch(b []byte, n int) [][][]byte { + if n < 0 { + n = len(b) + 1 + } + var result [][][]byte + re.allMatches("", b, n, func(match []int) { + if result == nil { + result = make([][][]byte, 0, startSize) + } + slice := make([][]byte, len(match)/2) + for j := range slice { + if match[2*j] >= 0 { + slice[j] = b[match[2*j]:match[2*j+1]:match[2*j+1]] + } + } + result = append(result, slice) + }) + return result +} + +// FindAllSubmatchIndex is the 'All' version of [Regexp.FindSubmatchIndex]; it returns +// a slice of all successive matches of the expression, as defined by the +// 'All' description in the package comment. +// A return value of nil indicates no match. +func (re *Regexp) FindAllSubmatchIndex(b []byte, n int) [][]int { + if n < 0 { + n = len(b) + 1 + } + var result [][]int + re.allMatches("", b, n, func(match []int) { + if result == nil { + result = make([][]int, 0, startSize) + } + result = append(result, match) + }) + return result +} + +// FindAllStringSubmatch is the 'All' version of [Regexp.FindStringSubmatch]; it +// returns a slice of all successive matches of the expression, as defined by +// the 'All' description in the package comment. +// A return value of nil indicates no match. +func (re *Regexp) FindAllStringSubmatch(s string, n int) [][]string { + if n < 0 { + n = len(s) + 1 + } + var result [][]string + re.allMatches(s, nil, n, func(match []int) { + if result == nil { + result = make([][]string, 0, startSize) + } + slice := make([]string, len(match)/2) + for j := range slice { + if match[2*j] >= 0 { + slice[j] = s[match[2*j]:match[2*j+1]] + } + } + result = append(result, slice) + }) + return result +} + +// FindAllStringSubmatchIndex is the 'All' version of +// [Regexp.FindStringSubmatchIndex]; it returns a slice of all successive matches of +// the expression, as defined by the 'All' description in the package +// comment. +// A return value of nil indicates no match. +func (re *Regexp) FindAllStringSubmatchIndex(s string, n int) [][]int { + if n < 0 { + n = len(s) + 1 + } + var result [][]int + re.allMatches(s, nil, n, func(match []int) { + if result == nil { + result = make([][]int, 0, startSize) + } + result = append(result, match) + }) + return result +} + +// Split slices s into substrings separated by the expression and returns a slice of +// the substrings between those expression matches. +// +// The slice returned by this method consists of all the substrings of s +// not contained in the slice returned by [Regexp.FindAllString]. When called on an expression +// that contains no metacharacters, it is equivalent to [strings.SplitN]. +// +// Example: +// +// s := regexp.MustCompile("a*").Split("abaabaccadaaae", 5) +// // s: ["", "b", "b", "c", "cadaaae"] +// +// The count determines the number of substrings to return: +// +// n > 0: at most n substrings; the last substring will be the unsplit remainder. +// n == 0: the result is nil (zero substrings) +// n < 0: all substrings +func (re *Regexp) Split(s string, n int) []string { + + if n == 0 { + return nil + } + + if len(re.expr) > 0 && len(s) == 0 { + return []string{""} + } + + matches := re.FindAllStringIndex(s, n) + strings := make([]string, 0, len(matches)) + + beg := 0 + end := 0 + for _, match := range matches { + if n > 0 && len(strings) >= n-1 { + break + } + + end = match[0] + if match[1] != 0 { + strings = append(strings, s[beg:end]) + } + beg = match[1] + } + + if end != len(s) { + strings = append(strings, s[beg:]) + } + + return strings +} + +// MarshalText implements [encoding.TextMarshaler]. The output +// matches that of calling the [Regexp.String] method. +// +// Note that the output is lossy in some cases: This method does not indicate +// POSIX regular expressions (i.e. those compiled by calling [CompilePOSIX]), or +// those for which the [Regexp.Longest] method has been called. +func (re *Regexp) MarshalText() ([]byte, error) { + return []byte(re.String()), nil +} + +// UnmarshalText implements [encoding.TextUnmarshaler] by calling +// [Compile] on the encoded value. +func (re *Regexp) UnmarshalText(text []byte) error { + newRE, err := Compile(string(text)) + if err != nil { + return err + } + *re = *newRE + return nil +} diff --git a/vendor/github.com/mattn/go-sqlite3/README.md b/vendor/github.com/mattn/go-sqlite3/README.md index 3b43b033e..76f49bacc 100644 --- a/vendor/github.com/mattn/go-sqlite3/README.md +++ b/vendor/github.com/mattn/go-sqlite3/README.md @@ -351,6 +351,8 @@ For example the TDM-GCC Toolchain can be found [here](https://jmeubank.github.io # User Authentication +***This is deprecated*** + This package supports the SQLite User Authentication module. ## Compile diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c index e9cca66cf..78b6f5b8a 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c @@ -1,7 +1,7 @@ #ifndef USE_LIBSQLITE3 /****************************************************************************** ** This file is an amalgamation of many separate C source files from SQLite -** version 3.49.1. By combining all the individual C code files into this +** version 3.50.4. By combining all the individual C code files into this ** single large file, the entire code can be compiled as a single translation ** unit. This allows many compilers to do optimizations that would not be ** possible if the files were compiled separately. Performance improvements @@ -19,7 +19,7 @@ ** separate file. This file contains only code for the core SQLite library. ** ** The content in this amalgamation comes from Fossil check-in -** 873d4e274b4988d260ba8354a9718324a1c2 with changes in files: +** 4d8adfb30e03f9cf27f800a2c1ba3c48fb4c with changes in files: ** ** */ @@ -453,7 +453,7 @@ extern "C" { ** ** Since [version 3.6.18] ([dateof:3.6.18]), ** SQLite source code has been stored in the -** Fossil configuration management +** Fossil configuration management ** system. ^The SQLITE_SOURCE_ID macro evaluates to ** a string which identifies a particular check-in of SQLite ** within its configuration management system. ^The SQLITE_SOURCE_ID @@ -466,9 +466,9 @@ extern "C" { ** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite_version()] and [sqlite_source_id()]. */ -#define SQLITE_VERSION "3.49.1" -#define SQLITE_VERSION_NUMBER 3049001 -#define SQLITE_SOURCE_ID "2025-02-18 13:38:58 873d4e274b4988d260ba8354a9718324a1c26187a4ab4c1cc0227c03d0f10e70" +#define SQLITE_VERSION "3.50.4" +#define SQLITE_VERSION_NUMBER 3050004 +#define SQLITE_SOURCE_ID "2025-07-30 19:33:53 4d8adfb30e03f9cf27f800a2c1ba3c48fb4ca1b08b0f5ed59a4d5ecbf45e20a3" /* ** CAPI3REF: Run-Time Library Version Numbers @@ -1483,6 +1483,12 @@ struct sqlite3_io_methods { ** the value that M is to be set to. Before returning, the 32-bit signed ** integer is overwritten with the previous value of M. ** +**
  • [[SQLITE_FCNTL_BLOCK_ON_CONNECT]] +** The [SQLITE_FCNTL_BLOCK_ON_CONNECT] opcode is used to configure the +** VFS to block when taking a SHARED lock to connect to a wal mode database. +** This is used to implement the functionality associated with +** SQLITE_SETLK_BLOCK_ON_CONNECT. +** **
  • [[SQLITE_FCNTL_DATA_VERSION]] ** The [SQLITE_FCNTL_DATA_VERSION] opcode is used to detect changes to ** a database file. The argument is a pointer to a 32-bit unsigned integer. @@ -1579,6 +1585,7 @@ struct sqlite3_io_methods { #define SQLITE_FCNTL_CKSM_FILE 41 #define SQLITE_FCNTL_RESET_CACHE 42 #define SQLITE_FCNTL_NULL_IO 43 +#define SQLITE_FCNTL_BLOCK_ON_CONNECT 44 /* deprecated names */ #define SQLITE_GET_LOCKPROXYFILE SQLITE_FCNTL_GET_LOCKPROXYFILE @@ -2309,13 +2316,16 @@ struct sqlite3_mem_methods { ** ** [[SQLITE_CONFIG_LOOKASIDE]]
    SQLITE_CONFIG_LOOKASIDE
    **
    ^(The SQLITE_CONFIG_LOOKASIDE option takes two arguments that determine -** the default size of lookaside memory on each [database connection]. +** the default size of [lookaside memory] on each [database connection]. ** The first argument is the -** size of each lookaside buffer slot and the second is the number of -** slots allocated to each database connection.)^ ^(SQLITE_CONFIG_LOOKASIDE -** sets the default lookaside size. The [SQLITE_DBCONFIG_LOOKASIDE] -** option to [sqlite3_db_config()] can be used to change the lookaside -** configuration on individual connections.)^
    +** size of each lookaside buffer slot ("sz") and the second is the number of +** slots allocated to each database connection ("cnt").)^ +** ^(SQLITE_CONFIG_LOOKASIDE sets the default lookaside size. +** The [SQLITE_DBCONFIG_LOOKASIDE] option to [sqlite3_db_config()] can +** be used to change the lookaside configuration on individual connections.)^ +** The [-DSQLITE_DEFAULT_LOOKASIDE] option can be used to change the +** default lookaside configuration at compile-time. +** ** ** [[SQLITE_CONFIG_PCACHE2]]
    SQLITE_CONFIG_PCACHE2
    **
    ^(The SQLITE_CONFIG_PCACHE2 option takes a single argument which is @@ -2552,31 +2562,50 @@ struct sqlite3_mem_methods { ** [[SQLITE_DBCONFIG_LOOKASIDE]] **
    SQLITE_DBCONFIG_LOOKASIDE
    **
    The SQLITE_DBCONFIG_LOOKASIDE option is used to adjust the -** configuration of the lookaside memory allocator within a database +** configuration of the [lookaside memory allocator] within a database ** connection. ** The arguments to the SQLITE_DBCONFIG_LOOKASIDE option are not ** in the [DBCONFIG arguments|usual format]. ** The SQLITE_DBCONFIG_LOOKASIDE option takes three arguments, not two, ** so that a call to [sqlite3_db_config()] that uses SQLITE_DBCONFIG_LOOKASIDE ** should have a total of five parameters. -** ^The first argument (the third parameter to [sqlite3_db_config()] is a +**
      +**
    1. The first argument ("buf") is a ** pointer to a memory buffer to use for lookaside memory. -** ^The first argument after the SQLITE_DBCONFIG_LOOKASIDE verb -** may be NULL in which case SQLite will allocate the -** lookaside buffer itself using [sqlite3_malloc()]. ^The second argument is the -** size of each lookaside buffer slot. ^The third argument is the number of -** slots. The size of the buffer in the first argument must be greater than -** or equal to the product of the second and third arguments. The buffer -** must be aligned to an 8-byte boundary. ^If the second argument to -** SQLITE_DBCONFIG_LOOKASIDE is not a multiple of 8, it is internally -** rounded down to the next smaller multiple of 8. ^(The lookaside memory +** The first argument may be NULL in which case SQLite will allocate the +** lookaside buffer itself using [sqlite3_malloc()]. +**

    2. The second argument ("sz") is the +** size of each lookaside buffer slot. Lookaside is disabled if "sz" +** is less than 8. The "sz" argument should be a multiple of 8 less than +** 65536. If "sz" does not meet this constraint, it is reduced in size until +** it does. +**

    3. The third argument ("cnt") is the number of slots. Lookaside is disabled +** if "cnt"is less than 1. The "cnt" value will be reduced, if necessary, so +** that the product of "sz" and "cnt" does not exceed 2,147,418,112. The "cnt" +** parameter is usually chosen so that the product of "sz" and "cnt" is less +** than 1,000,000. +**

    +**

    If the "buf" argument is not NULL, then it must +** point to a memory buffer with a size that is greater than +** or equal to the product of "sz" and "cnt". +** The buffer must be aligned to an 8-byte boundary. +** The lookaside memory ** configuration for a database connection can only be changed when that ** connection is not currently using lookaside memory, or in other words -** when the "current value" returned by -** [sqlite3_db_status](D,[SQLITE_DBSTATUS_LOOKASIDE_USED],...) is zero. +** when the value returned by [SQLITE_DBSTATUS_LOOKASIDE_USED] is zero. ** Any attempt to change the lookaside memory configuration when lookaside ** memory is in use leaves the configuration unchanged and returns -** [SQLITE_BUSY].)^

    +** [SQLITE_BUSY]. +** If the "buf" argument is NULL and an attempt +** to allocate memory based on "sz" and "cnt" fails, then +** lookaside is silently disabled. +**

    +** The [SQLITE_CONFIG_LOOKASIDE] configuration option can be used to set the +** default lookaside configuration at initialization. The +** [-DSQLITE_DEFAULT_LOOKASIDE] option can be used to set the default lookaside +** configuration at compile-time. Typical values for lookaside are 1200 for +** "sz" and 40 to 100 for "cnt". +** ** ** [[SQLITE_DBCONFIG_ENABLE_FKEY]] **

    SQLITE_DBCONFIG_ENABLE_FKEY
    @@ -3313,6 +3342,44 @@ SQLITE_API int sqlite3_busy_handler(sqlite3*,int(*)(void*,int),void*); */ SQLITE_API int sqlite3_busy_timeout(sqlite3*, int ms); +/* +** CAPI3REF: Set the Setlk Timeout +** METHOD: sqlite3 +** +** This routine is only useful in SQLITE_ENABLE_SETLK_TIMEOUT builds. If +** the VFS supports blocking locks, it sets the timeout in ms used by +** eligible locks taken on wal mode databases by the specified database +** handle. In non-SQLITE_ENABLE_SETLK_TIMEOUT builds, or if the VFS does +** not support blocking locks, this function is a no-op. +** +** Passing 0 to this function disables blocking locks altogether. Passing +** -1 to this function requests that the VFS blocks for a long time - +** indefinitely if possible. The results of passing any other negative value +** are undefined. +** +** Internally, each SQLite database handle store two timeout values - the +** busy-timeout (used for rollback mode databases, or if the VFS does not +** support blocking locks) and the setlk-timeout (used for blocking locks +** on wal-mode databases). The sqlite3_busy_timeout() method sets both +** values, this function sets only the setlk-timeout value. Therefore, +** to configure separate busy-timeout and setlk-timeout values for a single +** database handle, call sqlite3_busy_timeout() followed by this function. +** +** Whenever the number of connections to a wal mode database falls from +** 1 to 0, the last connection takes an exclusive lock on the database, +** then checkpoints and deletes the wal file. While it is doing this, any +** new connection that tries to read from the database fails with an +** SQLITE_BUSY error. Or, if the SQLITE_SETLK_BLOCK_ON_CONNECT flag is +** passed to this API, the new connection blocks until the exclusive lock +** has been released. +*/ +SQLITE_API int sqlite3_setlk_timeout(sqlite3*, int ms, int flags); + +/* +** CAPI3REF: Flags for sqlite3_setlk_timeout() +*/ +#define SQLITE_SETLK_BLOCK_ON_CONNECT 0x01 + /* ** CAPI3REF: Convenience Routines For Running Queries ** METHOD: sqlite3 @@ -4332,7 +4399,7 @@ SQLITE_API sqlite3_file *sqlite3_database_file_object(const char*); ** ** The sqlite3_create_filename(D,J,W,N,P) allocates memory to hold a version of ** database filename D with corresponding journal file J and WAL file W and -** with N URI parameters key/values pairs in the array P. The result from +** an array P of N URI Key/Value pairs. The result from ** sqlite3_create_filename(D,J,W,N,P) is a pointer to a database filename that ** is safe to pass to routines like: **
      @@ -5013,7 +5080,7 @@ typedef struct sqlite3_context sqlite3_context; ** METHOD: sqlite3_stmt ** ** ^(In the SQL statement text input to [sqlite3_prepare_v2()] and its variants, -** literals may be replaced by a [parameter] that matches one of following +** literals may be replaced by a [parameter] that matches one of the following ** templates: ** **
        @@ -5058,7 +5125,7 @@ typedef struct sqlite3_context sqlite3_context; ** ** [[byte-order determination rules]] ^The byte-order of ** UTF16 input text is determined by the byte-order mark (BOM, U+FEFF) -** found in first character, which is removed, or in the absence of a BOM +** found in the first character, which is removed, or in the absence of a BOM ** the byte order is the native byte order of the host ** machine for sqlite3_bind_text16() or the byte order specified in ** the 6th parameter for sqlite3_bind_text64().)^ @@ -5078,7 +5145,7 @@ typedef struct sqlite3_context sqlite3_context; ** or sqlite3_bind_text16() or sqlite3_bind_text64() then ** that parameter must be the byte offset ** where the NUL terminator would occur assuming the string were NUL -** terminated. If any NUL characters occurs at byte offsets less than +** terminated. If any NUL characters occur at byte offsets less than ** the value of the fourth parameter then the resulting string value will ** contain embedded NULs. The result of expressions involving strings ** with embedded NULs is undefined. @@ -5290,7 +5357,7 @@ SQLITE_API const void *sqlite3_column_name16(sqlite3_stmt*, int N); ** METHOD: sqlite3_stmt ** ** ^These routines provide a means to determine the database, table, and -** table column that is the origin of a particular result column in +** table column that is the origin of a particular result column in a ** [SELECT] statement. ** ^The name of the database or table or column can be returned as ** either a UTF-8 or UTF-16 string. ^The _database_ routines return @@ -5428,7 +5495,7 @@ SQLITE_API const void *sqlite3_column_decltype16(sqlite3_stmt*,int); ** other than [SQLITE_ROW] before any subsequent invocation of ** sqlite3_step(). Failure to reset the prepared statement using ** [sqlite3_reset()] would result in an [SQLITE_MISUSE] return from -** sqlite3_step(). But after [version 3.6.23.1] ([dateof:3.6.23.1], +** sqlite3_step(). But after [version 3.6.23.1] ([dateof:3.6.23.1]), ** sqlite3_step() began ** calling [sqlite3_reset()] automatically in this circumstance rather ** than returning [SQLITE_MISUSE]. This is not considered a compatibility @@ -5859,8 +5926,8 @@ SQLITE_API int sqlite3_reset(sqlite3_stmt *pStmt); ** ** For best security, the [SQLITE_DIRECTONLY] flag is recommended for ** all application-defined SQL functions that do not need to be -** used inside of triggers, view, CHECK constraints, or other elements of -** the database schema. This flags is especially recommended for SQL +** used inside of triggers, views, CHECK constraints, or other elements of +** the database schema. This flag is especially recommended for SQL ** functions that have side effects or reveal internal application state. ** Without this flag, an attacker might be able to modify the schema of ** a database file to include invocations of the function with parameters @@ -5891,7 +5958,7 @@ SQLITE_API int sqlite3_reset(sqlite3_stmt *pStmt); ** [user-defined window functions|available here]. ** ** ^(If the final parameter to sqlite3_create_function_v2() or -** sqlite3_create_window_function() is not NULL, then it is destructor for +** sqlite3_create_window_function() is not NULL, then it is the destructor for ** the application data pointer. The destructor is invoked when the function ** is deleted, either by being overloaded or when the database connection ** closes.)^ ^The destructor is also invoked if the call to @@ -6291,7 +6358,7 @@ SQLITE_API unsigned int sqlite3_value_subtype(sqlite3_value*); ** METHOD: sqlite3_value ** ** ^The sqlite3_value_dup(V) interface makes a copy of the [sqlite3_value] -** object D and returns a pointer to that copy. ^The [sqlite3_value] returned +** object V and returns a pointer to that copy. ^The [sqlite3_value] returned ** is a [protected sqlite3_value] object even if the input is not. ** ^The sqlite3_value_dup(V) interface returns NULL if V is NULL or if a ** memory allocation fails. ^If V is a [pointer value], then the result @@ -6329,7 +6396,7 @@ SQLITE_API void sqlite3_value_free(sqlite3_value*); ** allocation error occurs. ** ** ^(The amount of space allocated by sqlite3_aggregate_context(C,N) is -** determined by the N parameter on first successful call. Changing the +** determined by the N parameter on the first successful call. Changing the ** value of N in any subsequent call to sqlite3_aggregate_context() within ** the same aggregate function instance will not resize the memory ** allocation.)^ Within the xFinal callback, it is customary to set @@ -6491,7 +6558,7 @@ SQLITE_API void sqlite3_set_auxdata(sqlite3_context*, int N, void*, void (*)(voi ** ** Security Warning: These interfaces should not be exposed in scripting ** languages or in other circumstances where it might be possible for an -** an attacker to invoke them. Any agent that can invoke these interfaces +** attacker to invoke them. Any agent that can invoke these interfaces ** can probably also take control of the process. ** ** Database connection client data is only available for SQLite @@ -6605,7 +6672,7 @@ typedef void (*sqlite3_destructor_type)(void*); ** pointed to by the 2nd parameter are taken as the application-defined ** function result. If the 3rd parameter is non-negative, then it ** must be the byte offset into the string where the NUL terminator would -** appear if the string where NUL terminated. If any NUL characters occur +** appear if the string were NUL terminated. If any NUL characters occur ** in the string at a byte offset that is less than the value of the 3rd ** parameter, then the resulting string will contain embedded NULs and the ** result of expressions operating on strings with embedded NULs is undefined. @@ -6663,7 +6730,7 @@ typedef void (*sqlite3_destructor_type)(void*); ** string and preferably a string literal. The sqlite3_result_pointer() ** routine is part of the [pointer passing interface] added for SQLite 3.20.0. ** -** If these routines are called from within the different thread +** If these routines are called from within a different thread ** than the one containing the application-defined function that received ** the [sqlite3_context] pointer, the results are undefined. */ @@ -7069,7 +7136,7 @@ SQLITE_API sqlite3 *sqlite3_db_handle(sqlite3_stmt*); ** METHOD: sqlite3 ** ** ^The sqlite3_db_name(D,N) interface returns a pointer to the schema name -** for the N-th database on database connection D, or a NULL pointer of N is +** for the N-th database on database connection D, or a NULL pointer if N is ** out of range. An N value of 0 means the main database file. An N of 1 is ** the "temp" schema. Larger values of N correspond to various ATTACH-ed ** databases. @@ -7164,7 +7231,7 @@ SQLITE_API int sqlite3_txn_state(sqlite3*,const char *zSchema); **
        The SQLITE_TXN_READ state means that the database is currently ** in a read transaction. Content has been read from the database file ** but nothing in the database file has changed. The transaction state -** will advanced to SQLITE_TXN_WRITE if any changes occur and there are +** will be advanced to SQLITE_TXN_WRITE if any changes occur and there are ** no other conflicting concurrent write transactions. The transaction ** state will revert to SQLITE_TXN_NONE following a [ROLLBACK] or ** [COMMIT].
        @@ -7173,7 +7240,7 @@ SQLITE_API int sqlite3_txn_state(sqlite3*,const char *zSchema); **
        The SQLITE_TXN_WRITE state means that the database is currently ** in a write transaction. Content has been written to the database file ** but has not yet committed. The transaction state will change to -** to SQLITE_TXN_NONE at the next [ROLLBACK] or [COMMIT].
        +** SQLITE_TXN_NONE at the next [ROLLBACK] or [COMMIT]. */ #define SQLITE_TXN_NONE 0 #define SQLITE_TXN_READ 1 @@ -7324,6 +7391,8 @@ SQLITE_API int sqlite3_autovacuum_pages( ** ** ^The second argument is a pointer to the function to invoke when a ** row is updated, inserted or deleted in a rowid table. +** ^The update hook is disabled by invoking sqlite3_update_hook() +** with a NULL pointer as the second parameter. ** ^The first argument to the callback is a copy of the third argument ** to sqlite3_update_hook(). ** ^The second callback argument is one of [SQLITE_INSERT], [SQLITE_DELETE], @@ -7452,7 +7521,7 @@ SQLITE_API int sqlite3_db_release_memory(sqlite3*); ** CAPI3REF: Impose A Limit On Heap Size ** ** These interfaces impose limits on the amount of heap memory that will be -** by all database connections within a single process. +** used by all database connections within a single process. ** ** ^The sqlite3_soft_heap_limit64() interface sets and/or queries the ** soft limit on the amount of heap memory that may be allocated by SQLite. @@ -7510,7 +7579,7 @@ SQLITE_API int sqlite3_db_release_memory(sqlite3*); **
      )^ ** ** The circumstances under which SQLite will enforce the heap limits may -** changes in future releases of SQLite. +** change in future releases of SQLite. */ SQLITE_API sqlite3_int64 sqlite3_soft_heap_limit64(sqlite3_int64 N); SQLITE_API sqlite3_int64 sqlite3_hard_heap_limit64(sqlite3_int64 N); @@ -7625,8 +7694,8 @@ SQLITE_API int sqlite3_table_column_metadata( ** ^The entry point is zProc. ** ^(zProc may be 0, in which case SQLite will try to come up with an ** entry point name on its own. It first tries "sqlite3_extension_init". -** If that does not work, it constructs a name "sqlite3_X_init" where the -** X is consists of the lower-case equivalent of all ASCII alphabetic +** If that does not work, it constructs a name "sqlite3_X_init" where +** X consists of the lower-case equivalent of all ASCII alphabetic ** characters in the filename from the last "/" to the first following ** "." and omitting any initial "lib".)^ ** ^The sqlite3_load_extension() interface returns @@ -7697,7 +7766,7 @@ SQLITE_API int sqlite3_enable_load_extension(sqlite3 *db, int onoff); ** ^(Even though the function prototype shows that xEntryPoint() takes ** no arguments and returns void, SQLite invokes xEntryPoint() with three ** arguments and expects an integer result as if the signature of the -** entry point where as follows: +** entry point were as follows: ** **
       **    int xEntryPoint(
      @@ -7861,7 +7930,7 @@ struct sqlite3_module {
       ** virtual table and might not be checked again by the byte code.)^ ^(The
       ** aConstraintUsage[].omit flag is an optimization hint. When the omit flag
       ** is left in its default setting of false, the constraint will always be
      -** checked separately in byte code.  If the omit flag is change to true, then
      +** checked separately in byte code.  If the omit flag is changed to true, then
       ** the constraint may or may not be checked in byte code.  In other words,
       ** when the omit flag is true there is no guarantee that the constraint will
       ** not be checked again using byte code.)^
      @@ -7887,7 +7956,7 @@ struct sqlite3_module {
       ** The xBestIndex method may optionally populate the idxFlags field with a
       ** mask of SQLITE_INDEX_SCAN_* flags. One such flag is
       ** [SQLITE_INDEX_SCAN_HEX], which if set causes the [EXPLAIN QUERY PLAN]
      -** output to show the idxNum has hex instead of as decimal.  Another flag is
      +** output to show the idxNum as hex instead of as decimal.  Another flag is
       ** SQLITE_INDEX_SCAN_UNIQUE, which if set indicates that the query plan will
       ** return at most one row.
       **
      @@ -8028,7 +8097,7 @@ struct sqlite3_index_info {
       ** the implementation of the [virtual table module].   ^The fourth
       ** parameter is an arbitrary client data pointer that is passed through
       ** into the [xCreate] and [xConnect] methods of the virtual table module
      -** when a new virtual table is be being created or reinitialized.
      +** when a new virtual table is being created or reinitialized.
       **
       ** ^The sqlite3_create_module_v2() interface has a fifth parameter which
       ** is a pointer to a destructor for the pClientData.  ^SQLite will
      @@ -8193,7 +8262,7 @@ typedef struct sqlite3_blob sqlite3_blob;
       ** in *ppBlob. Otherwise an [error code] is returned and, unless the error
       ** code is SQLITE_MISUSE, *ppBlob is set to NULL.)^ ^This means that, provided
       ** the API is not misused, it is always safe to call [sqlite3_blob_close()]
      -** on *ppBlob after this function it returns.
      +** on *ppBlob after this function returns.
       **
       ** This function fails with SQLITE_ERROR if any of the following are true:
       ** 
        @@ -8313,7 +8382,7 @@ SQLITE_API int sqlite3_blob_close(sqlite3_blob *); ** ** ^Returns the size in bytes of the BLOB accessible via the ** successfully opened [BLOB handle] in its only argument. ^The -** incremental blob I/O routines can only read or overwriting existing +** incremental blob I/O routines can only read or overwrite existing ** blob content; they cannot change the size of a blob. ** ** This routine only works on a [BLOB handle] which has been created @@ -8463,7 +8532,7 @@ SQLITE_API int sqlite3_vfs_unregister(sqlite3_vfs*); ** ^The sqlite3_mutex_alloc() routine allocates a new ** mutex and returns a pointer to it. ^The sqlite3_mutex_alloc() ** routine returns NULL if it is unable to allocate the requested -** mutex. The argument to sqlite3_mutex_alloc() must one of these +** mutex. The argument to sqlite3_mutex_alloc() must be one of these ** integer constants: ** **
          @@ -8696,7 +8765,7 @@ SQLITE_API int sqlite3_mutex_notheld(sqlite3_mutex*); ** CAPI3REF: Retrieve the mutex for a database connection ** METHOD: sqlite3 ** -** ^This interface returns a pointer the [sqlite3_mutex] object that +** ^This interface returns a pointer to the [sqlite3_mutex] object that ** serializes access to the [database connection] given in the argument ** when the [threading mode] is Serialized. ** ^If the [threading mode] is Single-thread or Multi-thread then this @@ -8819,7 +8888,7 @@ SQLITE_API int sqlite3_test_control(int op, ...); ** CAPI3REF: SQL Keyword Checking ** ** These routines provide access to the set of SQL language keywords -** recognized by SQLite. Applications can uses these routines to determine +** recognized by SQLite. Applications can use these routines to determine ** whether or not a specific identifier needs to be escaped (for example, ** by enclosing in double-quotes) so as not to confuse the parser. ** @@ -8987,7 +9056,7 @@ SQLITE_API void sqlite3_str_reset(sqlite3_str*); ** content of the dynamic string under construction in X. The value ** returned by [sqlite3_str_value(X)] is managed by the sqlite3_str object X ** and might be freed or altered by any subsequent method on the same -** [sqlite3_str] object. Applications must not used the pointer returned +** [sqlite3_str] object. Applications must not use the pointer returned by ** [sqlite3_str_value(X)] after any subsequent method call on the same ** object. ^Applications may change the content of the string returned ** by [sqlite3_str_value(X)] as long as they do not write into any bytes @@ -9073,7 +9142,7 @@ SQLITE_API int sqlite3_status64( ** allocation which could not be satisfied by the [SQLITE_CONFIG_PAGECACHE] ** buffer and where forced to overflow to [sqlite3_malloc()]. The ** returned value includes allocations that overflowed because they -** where too large (they were larger than the "sz" parameter to +** were too large (they were larger than the "sz" parameter to ** [SQLITE_CONFIG_PAGECACHE]) and allocations that overflowed because ** no space was left in the page cache.)^ ** @@ -9157,28 +9226,29 @@ SQLITE_API int sqlite3_db_status(sqlite3*, int op, int *pCur, int *pHiwtr, int r ** [[SQLITE_DBSTATUS_LOOKASIDE_HIT]] ^(
          SQLITE_DBSTATUS_LOOKASIDE_HIT
          **
          This parameter returns the number of malloc attempts that were ** satisfied using lookaside memory. Only the high-water value is meaningful; -** the current value is always zero.)^ +** the current value is always zero.
          )^ ** ** [[SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE]] ** ^(
          SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE
          -**
          This parameter returns the number malloc attempts that might have +**
          This parameter returns the number of malloc attempts that might have ** been satisfied using lookaside memory but failed due to the amount of ** memory requested being larger than the lookaside slot size. ** Only the high-water value is meaningful; -** the current value is always zero.)^ +** the current value is always zero.
          )^ ** ** [[SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL]] ** ^(
          SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL
          -**
          This parameter returns the number malloc attempts that might have +**
          This parameter returns the number of malloc attempts that might have ** been satisfied using lookaside memory but failed due to all lookaside ** memory already being in use. ** Only the high-water value is meaningful; -** the current value is always zero.)^ +** the current value is always zero.
          )^ ** ** [[SQLITE_DBSTATUS_CACHE_USED]] ^(
          SQLITE_DBSTATUS_CACHE_USED
          **
          This parameter returns the approximate number of bytes of heap ** memory used by all pager caches associated with the database connection.)^ ** ^The highwater mark associated with SQLITE_DBSTATUS_CACHE_USED is always 0. +**
          ** ** [[SQLITE_DBSTATUS_CACHE_USED_SHARED]] ** ^(
          SQLITE_DBSTATUS_CACHE_USED_SHARED
          @@ -9187,10 +9257,10 @@ SQLITE_API int sqlite3_db_status(sqlite3*, int op, int *pCur, int *pHiwtr, int r ** memory used by that pager cache is divided evenly between the attached ** connections.)^ In other words, if none of the pager caches associated ** with the database connection are shared, this request returns the same -** value as DBSTATUS_CACHE_USED. Or, if one or more or the pager caches are +** value as DBSTATUS_CACHE_USED. Or, if one or more of the pager caches are ** shared, the value returned by this call will be smaller than that returned ** by DBSTATUS_CACHE_USED. ^The highwater mark associated with -** SQLITE_DBSTATUS_CACHE_USED_SHARED is always 0. +** SQLITE_DBSTATUS_CACHE_USED_SHARED is always 0. ** ** [[SQLITE_DBSTATUS_SCHEMA_USED]] ^(
          SQLITE_DBSTATUS_SCHEMA_USED
          **
          This parameter returns the approximate number of bytes of heap @@ -9200,6 +9270,7 @@ SQLITE_API int sqlite3_db_status(sqlite3*, int op, int *pCur, int *pHiwtr, int r ** schema memory is shared with other database connections due to ** [shared cache mode] being enabled. ** ^The highwater mark associated with SQLITE_DBSTATUS_SCHEMA_USED is always 0. +**
          ** ** [[SQLITE_DBSTATUS_STMT_USED]] ^(
          SQLITE_DBSTATUS_STMT_USED
          **
          This parameter returns the approximate number of bytes of heap @@ -9236,7 +9307,7 @@ SQLITE_API int sqlite3_db_status(sqlite3*, int op, int *pCur, int *pHiwtr, int r ** been written to disk in the middle of a transaction due to the page ** cache overflowing. Transactions are more efficient if they are written ** to disk all at once. When pages spill mid-transaction, that introduces -** additional overhead. This parameter can be used help identify +** additional overhead. This parameter can be used to help identify ** inefficiencies that can be resolved by increasing the cache size. **
          ** @@ -9307,13 +9378,13 @@ SQLITE_API int sqlite3_stmt_status(sqlite3_stmt*, int op,int resetFlg); ** [[SQLITE_STMTSTATUS_SORT]]
          SQLITE_STMTSTATUS_SORT
          **
          ^This is the number of sort operations that have occurred. ** A non-zero value in this counter may indicate an opportunity to -** improvement performance through careful use of indices.
          +** improve performance through careful use of indices. ** ** [[SQLITE_STMTSTATUS_AUTOINDEX]]
          SQLITE_STMTSTATUS_AUTOINDEX
          **
          ^This is the number of rows inserted into transient indices that ** were created automatically in order to help joins run faster. ** A non-zero value in this counter may indicate an opportunity to -** improvement performance by adding permanent indices that do not +** improve performance by adding permanent indices that do not ** need to be reinitialized each time the statement is run.
          ** ** [[SQLITE_STMTSTATUS_VM_STEP]]
          SQLITE_STMTSTATUS_VM_STEP
          @@ -9322,19 +9393,19 @@ SQLITE_API int sqlite3_stmt_status(sqlite3_stmt*, int op,int resetFlg); ** to 2147483647. The number of virtual machine operations can be ** used as a proxy for the total work done by the prepared statement. ** If the number of virtual machine operations exceeds 2147483647 -** then the value returned by this statement status code is undefined. +** then the value returned by this statement status code is undefined. ** ** [[SQLITE_STMTSTATUS_REPREPARE]]
          SQLITE_STMTSTATUS_REPREPARE
          **
          ^This is the number of times that the prepare statement has been ** automatically regenerated due to schema changes or changes to -** [bound parameters] that might affect the query plan. +** [bound parameters] that might affect the query plan.
          ** ** [[SQLITE_STMTSTATUS_RUN]]
          SQLITE_STMTSTATUS_RUN
          **
          ^This is the number of times that the prepared statement has ** been run. A single "run" for the purposes of this counter is one ** or more calls to [sqlite3_step()] followed by a call to [sqlite3_reset()]. ** The counter is incremented on the first [sqlite3_step()] call of each -** cycle. +** cycle.
          ** ** [[SQLITE_STMTSTATUS_FILTER_MISS]] ** [[SQLITE_STMTSTATUS_FILTER HIT]] @@ -9344,7 +9415,7 @@ SQLITE_API int sqlite3_stmt_status(sqlite3_stmt*, int op,int resetFlg); ** step was bypassed because a Bloom filter returned not-found. The ** corresponding SQLITE_STMTSTATUS_FILTER_MISS value is the number of ** times that the Bloom filter returned a find, and thus the join step -** had to be processed as normal. +** had to be processed as normal. ** ** [[SQLITE_STMTSTATUS_MEMUSED]]
          SQLITE_STMTSTATUS_MEMUSED
          **
          ^This is the approximate number of bytes of heap memory @@ -9449,9 +9520,9 @@ struct sqlite3_pcache_page { ** SQLite will typically create one cache instance for each open database file, ** though this is not guaranteed. ^The ** first parameter, szPage, is the size in bytes of the pages that must -** be allocated by the cache. ^szPage will always a power of two. ^The +** be allocated by the cache. ^szPage will always be a power of two. ^The ** second parameter szExtra is a number of bytes of extra storage -** associated with each page cache entry. ^The szExtra parameter will +** associated with each page cache entry. ^The szExtra parameter will be ** a number less than 250. SQLite will use the ** extra szExtra bytes on each page to store metadata about the underlying ** database page on disk. The value passed into szExtra depends @@ -9459,17 +9530,17 @@ struct sqlite3_pcache_page { ** ^The third argument to xCreate(), bPurgeable, is true if the cache being ** created will be used to cache database pages of a file stored on disk, or ** false if it is used for an in-memory database. The cache implementation -** does not have to do anything special based with the value of bPurgeable; +** does not have to do anything special based upon the value of bPurgeable; ** it is purely advisory. ^On a cache where bPurgeable is false, SQLite will ** never invoke xUnpin() except to deliberately delete a page. ** ^In other words, calls to xUnpin() on a cache with bPurgeable set to ** false will always have the "discard" flag set to true. -** ^Hence, a cache created with bPurgeable false will +** ^Hence, a cache created with bPurgeable set to false will ** never contain any unpinned pages. ** ** [[the xCachesize() page cache method]] ** ^(The xCachesize() method may be called at any time by SQLite to set the -** suggested maximum cache-size (number of pages stored by) the cache +** suggested maximum cache-size (number of pages stored) for the cache ** instance passed as the first argument. This is the value configured using ** the SQLite "[PRAGMA cache_size]" command.)^ As with the bPurgeable ** parameter, the implementation is not required to do anything with this @@ -9496,12 +9567,12 @@ struct sqlite3_pcache_page { ** implementation must return a pointer to the page buffer with its content ** intact. If the requested page is not already in the cache, then the ** cache implementation should use the value of the createFlag -** parameter to help it determined what action to take: +** parameter to help it determine what action to take: ** ** **
          createFlag Behavior when page is not already in cache **
          0 Do not allocate a new page. Return NULL. -**
          1 Allocate a new page if it easy and convenient to do so. +**
          1 Allocate a new page if it is easy and convenient to do so. ** Otherwise return NULL. **
          2 Make every effort to allocate a new page. Only return ** NULL if allocating a new page is effectively impossible. @@ -9518,7 +9589,7 @@ struct sqlite3_pcache_page { ** as its second argument. If the third parameter, discard, is non-zero, ** then the page must be evicted from the cache. ** ^If the discard parameter is -** zero, then the page may be discarded or retained at the discretion of +** zero, then the page may be discarded or retained at the discretion of the ** page cache implementation. ^The page cache implementation ** may choose to evict unpinned pages at any time. ** @@ -9536,7 +9607,7 @@ struct sqlite3_pcache_page { ** When SQLite calls the xTruncate() method, the cache must discard all ** existing cache entries with page numbers (keys) greater than or equal ** to the value of the iLimit parameter passed to xTruncate(). If any -** of these pages are pinned, they are implicitly unpinned, meaning that +** of these pages are pinned, they become implicitly unpinned, meaning that ** they can be safely discarded. ** ** [[the xDestroy() page cache method]] @@ -9716,7 +9787,7 @@ typedef struct sqlite3_backup sqlite3_backup; ** external process or via a database connection other than the one being ** used by the backup operation, then the backup will be automatically ** restarted by the next call to sqlite3_backup_step(). ^If the source -** database is modified by the using the same database connection as is used +** database is modified by using the same database connection as is used ** by the backup operation, then the backup database is automatically ** updated at the same time. ** @@ -9733,7 +9804,7 @@ typedef struct sqlite3_backup sqlite3_backup; ** and may not be used following a call to sqlite3_backup_finish(). ** ** ^The value returned by sqlite3_backup_finish is [SQLITE_OK] if no -** sqlite3_backup_step() errors occurred, regardless or whether or not +** sqlite3_backup_step() errors occurred, regardless of whether or not ** sqlite3_backup_step() completed. ** ^If an out-of-memory condition or IO error occurred during any prior ** sqlite3_backup_step() call on the same [sqlite3_backup] object, then @@ -9835,7 +9906,7 @@ SQLITE_API int sqlite3_backup_pagecount(sqlite3_backup *p); ** application receives an SQLITE_LOCKED error, it may call the ** sqlite3_unlock_notify() method with the blocked connection handle as ** the first argument to register for a callback that will be invoked -** when the blocking connections current transaction is concluded. ^The +** when the blocking connection's current transaction is concluded. ^The ** callback is invoked from within the [sqlite3_step] or [sqlite3_close] ** call that concludes the blocking connection's transaction. ** @@ -9855,7 +9926,7 @@ SQLITE_API int sqlite3_backup_pagecount(sqlite3_backup *p); ** blocked connection already has a registered unlock-notify callback, ** then the new callback replaces the old.)^ ^If sqlite3_unlock_notify() is ** called with a NULL pointer as its second argument, then any existing -** unlock-notify callback is canceled. ^The blocked connections +** unlock-notify callback is canceled. ^The blocked connection's ** unlock-notify callback may also be canceled by closing the blocked ** connection using [sqlite3_close()]. ** @@ -10253,7 +10324,7 @@ SQLITE_API int sqlite3_vtab_config(sqlite3*, int op, ...); ** support constraints. In this configuration (which is the default) if ** a call to the [xUpdate] method returns [SQLITE_CONSTRAINT], then the entire ** statement is rolled back as if [ON CONFLICT | OR ABORT] had been -** specified as part of the users SQL statement, regardless of the actual +** specified as part of the user's SQL statement, regardless of the actual ** ON CONFLICT mode specified. ** ** If X is non-zero, then the virtual table implementation guarantees @@ -10287,7 +10358,7 @@ SQLITE_API int sqlite3_vtab_config(sqlite3*, int op, ...); ** [[SQLITE_VTAB_INNOCUOUS]]
          SQLITE_VTAB_INNOCUOUS
          **
          Calls of the form ** [sqlite3_vtab_config](db,SQLITE_VTAB_INNOCUOUS) from within the -** the [xConnect] or [xCreate] methods of a [virtual table] implementation +** [xConnect] or [xCreate] methods of a [virtual table] implementation ** identify that virtual table as being safe to use from within triggers ** and views. Conceptually, the SQLITE_VTAB_INNOCUOUS tag means that the ** virtual table can do no serious harm even if it is controlled by a @@ -10455,7 +10526,7 @@ SQLITE_API const char *sqlite3_vtab_collation(sqlite3_index_info*,int); **
          ** ** ^For the purposes of comparing virtual table output values to see if the -** values are same value for sorting purposes, two NULL values are considered +** values are the same value for sorting purposes, two NULL values are considered ** to be the same. In other words, the comparison operator is "IS" ** (or "IS NOT DISTINCT FROM") and not "==". ** @@ -10465,7 +10536,7 @@ SQLITE_API const char *sqlite3_vtab_collation(sqlite3_index_info*,int); ** ** ^A virtual table implementation is always free to return rows in any order ** it wants, as long as the "orderByConsumed" flag is not set. ^When the -** the "orderByConsumed" flag is unset, the query planner will add extra +** "orderByConsumed" flag is unset, the query planner will add extra ** [bytecode] to ensure that the final results returned by the SQL query are ** ordered correctly. The use of the "orderByConsumed" flag and the ** sqlite3_vtab_distinct() interface is merely an optimization. ^Careful @@ -10562,7 +10633,7 @@ SQLITE_API int sqlite3_vtab_in(sqlite3_index_info*, int iCons, int bHandle); ** sqlite3_vtab_in_next(X,P) should be one of the parameters to the ** xFilter method which invokes these routines, and specifically ** a parameter that was previously selected for all-at-once IN constraint -** processing use the [sqlite3_vtab_in()] interface in the +** processing using the [sqlite3_vtab_in()] interface in the ** [xBestIndex|xBestIndex method]. ^(If the X parameter is not ** an xFilter argument that was selected for all-at-once IN constraint ** processing, then these routines return [SQLITE_ERROR].)^ @@ -10617,7 +10688,7 @@ SQLITE_API int sqlite3_vtab_in_next(sqlite3_value *pVal, sqlite3_value **ppOut); ** and only if *V is set to a value. ^The sqlite3_vtab_rhs_value(P,J,V) ** inteface returns SQLITE_NOTFOUND if the right-hand side of the J-th ** constraint is not available. ^The sqlite3_vtab_rhs_value() interface -** can return an result code other than SQLITE_OK or SQLITE_NOTFOUND if +** can return a result code other than SQLITE_OK or SQLITE_NOTFOUND if ** something goes wrong. ** ** The sqlite3_vtab_rhs_value() interface is usually only successful if @@ -10645,8 +10716,8 @@ SQLITE_API int sqlite3_vtab_rhs_value(sqlite3_index_info*, int, sqlite3_value ** ** KEYWORDS: {conflict resolution mode} ** ** These constants are returned by [sqlite3_vtab_on_conflict()] to -** inform a [virtual table] implementation what the [ON CONFLICT] mode -** is for the SQL statement being evaluated. +** inform a [virtual table] implementation of the [ON CONFLICT] mode +** for the SQL statement being evaluated. ** ** Note that the [SQLITE_IGNORE] constant is also used as a potential ** return value from the [sqlite3_set_authorizer()] callback and that @@ -10686,39 +10757,39 @@ SQLITE_API int sqlite3_vtab_rhs_value(sqlite3_index_info*, int, sqlite3_value ** ** [[SQLITE_SCANSTAT_EST]]
          SQLITE_SCANSTAT_EST
          **
          ^The "double" variable pointed to by the V parameter will be set to the ** query planner's estimate for the average number of rows output from each -** iteration of the X-th loop. If the query planner's estimates was accurate, +** iteration of the X-th loop. If the query planner's estimate was accurate, ** then this value will approximate the quotient NVISIT/NLOOP and the ** product of this value for all prior loops with the same SELECTID will -** be the NLOOP value for the current loop. +** be the NLOOP value for the current loop.
          ** ** [[SQLITE_SCANSTAT_NAME]]
          SQLITE_SCANSTAT_NAME
          **
          ^The "const char *" variable pointed to by the V parameter will be set ** to a zero-terminated UTF-8 string containing the name of the index or table -** used for the X-th loop. +** used for the X-th loop.
          ** ** [[SQLITE_SCANSTAT_EXPLAIN]]
          SQLITE_SCANSTAT_EXPLAIN
          **
          ^The "const char *" variable pointed to by the V parameter will be set ** to a zero-terminated UTF-8 string containing the [EXPLAIN QUERY PLAN] -** description for the X-th loop. +** description for the X-th loop.
          ** ** [[SQLITE_SCANSTAT_SELECTID]]
          SQLITE_SCANSTAT_SELECTID
          **
          ^The "int" variable pointed to by the V parameter will be set to the ** id for the X-th query plan element. The id value is unique within the ** statement. The select-id is the same value as is output in the first -** column of an [EXPLAIN QUERY PLAN] query. +** column of an [EXPLAIN QUERY PLAN] query.
          ** ** [[SQLITE_SCANSTAT_PARENTID]]
          SQLITE_SCANSTAT_PARENTID
          **
          The "int" variable pointed to by the V parameter will be set to the -** the id of the parent of the current query element, if applicable, or +** id of the parent of the current query element, if applicable, or ** to zero if the query element has no parent. This is the same value as -** returned in the second column of an [EXPLAIN QUERY PLAN] query. +** returned in the second column of an [EXPLAIN QUERY PLAN] query.
          ** ** [[SQLITE_SCANSTAT_NCYCLE]]
          SQLITE_SCANSTAT_NCYCLE
          **
          The sqlite3_int64 output value is set to the number of cycles, ** according to the processor time-stamp counter, that elapsed while the ** query element was being processed. This value is not available for ** all query elements - if it is unavailable the output variable is -** set to -1. +** set to -1.
          ** */ #define SQLITE_SCANSTAT_NLOOP 0 @@ -10759,8 +10830,8 @@ SQLITE_API int sqlite3_vtab_rhs_value(sqlite3_index_info*, int, sqlite3_value ** ** sqlite3_stmt_scanstatus_v2() with a zeroed flags parameter. ** ** Parameter "idx" identifies the specific query element to retrieve statistics -** for. Query elements are numbered starting from zero. A value of -1 may be -** to query for statistics regarding the entire query. ^If idx is out of range +** for. Query elements are numbered starting from zero. A value of -1 may +** retrieve statistics for the entire query. ^If idx is out of range ** - less than -1 or greater than or equal to the total number of query ** elements used to implement the statement - a non-zero value is returned and ** the variable that pOut points to is unchanged. @@ -10803,7 +10874,7 @@ SQLITE_API void sqlite3_stmt_scanstatus_reset(sqlite3_stmt*); ** METHOD: sqlite3 ** ** ^If a write-transaction is open on [database connection] D when the -** [sqlite3_db_cacheflush(D)] interface invoked, any dirty +** [sqlite3_db_cacheflush(D)] interface is invoked, any dirty ** pages in the pager-cache that are not currently in use are written out ** to disk. A dirty page may be in use if a database cursor created by an ** active SQL statement is reading from it, or if it is page 1 of a database @@ -10917,8 +10988,8 @@ SQLITE_API int sqlite3_db_cacheflush(sqlite3*); ** triggers; and so forth. ** ** When the [sqlite3_blob_write()] API is used to update a blob column, -** the pre-update hook is invoked with SQLITE_DELETE. This is because the -** in this case the new values are not available. In this case, when a +** the pre-update hook is invoked with SQLITE_DELETE, because +** the new values are not yet available. In this case, when a ** callback made with op==SQLITE_DELETE is actually a write using the ** sqlite3_blob_write() API, the [sqlite3_preupdate_blobwrite()] returns ** the index of the column being written. In other cases, where the @@ -11171,7 +11242,7 @@ SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_recover(sqlite3 *db, const c ** For an ordinary on-disk database file, the serialization is just a ** copy of the disk file. For an in-memory database or a "TEMP" database, ** the serialization is the same sequence of bytes which would be written -** to disk if that database where backed up to disk. +** to disk if that database were backed up to disk. ** ** The usual case is that sqlite3_serialize() copies the serialization of ** the database into memory obtained from [sqlite3_malloc64()] and returns @@ -11180,7 +11251,7 @@ SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_recover(sqlite3 *db, const c ** contains the SQLITE_SERIALIZE_NOCOPY bit, then no memory allocations ** are made, and the sqlite3_serialize() function will return a pointer ** to the contiguous memory representation of the database that SQLite -** is currently using for that database, or NULL if the no such contiguous +** is currently using for that database, or NULL if no such contiguous ** memory representation of the database exists. A contiguous memory ** representation of the database will usually only exist if there has ** been a prior call to [sqlite3_deserialize(D,S,...)] with the same @@ -11251,7 +11322,7 @@ SQLITE_API unsigned char *sqlite3_serialize( ** database is currently in a read transaction or is involved in a backup ** operation. ** -** It is not possible to deserialized into the TEMP database. If the +** It is not possible to deserialize into the TEMP database. If the ** S argument to sqlite3_deserialize(D,S,P,N,M,F) is "temp" then the ** function returns SQLITE_ERROR. ** @@ -11273,7 +11344,7 @@ SQLITE_API int sqlite3_deserialize( sqlite3 *db, /* The database connection */ const char *zSchema, /* Which DB to reopen with the deserialization */ unsigned char *pData, /* The serialized database content */ - sqlite3_int64 szDb, /* Number bytes in the deserialization */ + sqlite3_int64 szDb, /* Number of bytes in the deserialization */ sqlite3_int64 szBuf, /* Total size of buffer pData[] */ unsigned mFlags /* Zero or more SQLITE_DESERIALIZE_* flags */ ); @@ -11281,7 +11352,7 @@ SQLITE_API int sqlite3_deserialize( /* ** CAPI3REF: Flags for sqlite3_deserialize() ** -** The following are allowed values for 6th argument (the F argument) to +** The following are allowed values for the 6th argument (the F argument) to ** the [sqlite3_deserialize(D,S,P,N,M,F)] interface. ** ** The SQLITE_DESERIALIZE_FREEONCLOSE means that the database serialization @@ -11806,9 +11877,10 @@ SQLITE_API void sqlite3session_table_filter( ** is inserted while a session object is enabled, then later deleted while ** the same session object is disabled, no INSERT record will appear in the ** changeset, even though the delete took place while the session was disabled. -** Or, if one field of a row is updated while a session is disabled, and -** another field of the same row is updated while the session is enabled, the -** resulting changeset will contain an UPDATE change that updates both fields. +** Or, if one field of a row is updated while a session is enabled, and +** then another field of the same row is updated while the session is disabled, +** the resulting changeset will contain an UPDATE change that updates both +** fields. */ SQLITE_API int sqlite3session_changeset( sqlite3_session *pSession, /* Session object */ @@ -11880,8 +11952,9 @@ SQLITE_API sqlite3_int64 sqlite3session_changeset_size(sqlite3_session *pSession ** database zFrom the contents of the two compatible tables would be ** identical. ** -** It an error if database zFrom does not exist or does not contain the -** required compatible table. +** Unless the call to this function is a no-op as described above, it is an +** error if database zFrom does not exist or does not contain the required +** compatible table. ** ** If the operation is successful, SQLITE_OK is returned. Otherwise, an SQLite ** error code. In this case, if argument pzErrMsg is not NULL, *pzErrMsg @@ -12016,7 +12089,7 @@ SQLITE_API int sqlite3changeset_start_v2( ** The following flags may passed via the 4th parameter to ** [sqlite3changeset_start_v2] and [sqlite3changeset_start_v2_strm]: ** -**
          SQLITE_CHANGESETAPPLY_INVERT
          +**
          SQLITE_CHANGESETSTART_INVERT
          ** Invert the changeset while iterating through it. This is equivalent to ** inverting a changeset using sqlite3changeset_invert() before applying it. ** It is an error to specify this flag with a patchset. @@ -12331,19 +12404,6 @@ SQLITE_API int sqlite3changeset_concat( void **ppOut /* OUT: Buffer containing output changeset */ ); - -/* -** CAPI3REF: Upgrade the Schema of a Changeset/Patchset -*/ -SQLITE_API int sqlite3changeset_upgrade( - sqlite3 *db, - const char *zDb, - int nIn, const void *pIn, /* Input changeset */ - int *pnOut, void **ppOut /* OUT: Inverse of input */ -); - - - /* ** CAPI3REF: Changegroup Handle ** @@ -14091,14 +14151,22 @@ struct fts5_api { ** * Terms in the GROUP BY or ORDER BY clauses of a SELECT statement. ** * Terms in the VALUES clause of an INSERT statement ** -** The hard upper limit here is 32676. Most database people will +** The hard upper limit here is 32767. Most database people will ** tell you that in a well-normalized database, you usually should ** not have more than a dozen or so columns in any table. And if ** that is the case, there is no point in having more than a few ** dozen values in any of the other situations described above. +** +** An index can only have SQLITE_MAX_COLUMN columns from the user +** point of view, but the underlying b-tree that implements the index +** might have up to twice as many columns in a WITHOUT ROWID table, +** since must also store the primary key at the end. Hence the +** column count for Index is u16 instead of i16. */ -#ifndef SQLITE_MAX_COLUMN +#if !defined(SQLITE_MAX_COLUMN) # define SQLITE_MAX_COLUMN 2000 +#elif SQLITE_MAX_COLUMN>32767 +# error SQLITE_MAX_COLUMN may not exceed 32767 #endif /* @@ -14750,6 +14818,7 @@ struct HashElem { HashElem *next, *prev; /* Next and previous elements in the table */ void *data; /* Data associated with this element */ const char *pKey; /* Key associated with this element */ + unsigned int h; /* hash for pKey */ }; /* @@ -15110,7 +15179,17 @@ SQLITE_PRIVATE void sqlite3HashClear(Hash*); ** ourselves. */ #ifndef offsetof -#define offsetof(STRUCTURE,FIELD) ((int)((char*)&((STRUCTURE*)0)->FIELD)) +#define offsetof(STRUCTURE,FIELD) ((size_t)((char*)&((STRUCTURE*)0)->FIELD)) +#endif + +/* +** Work around C99 "flex-array" syntax for pre-C99 compilers, so as +** to avoid complaints from -fsanitize=strict-bounds. +*/ +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) +# define FLEXARRAY +#else +# define FLEXARRAY 1 #endif /* @@ -15188,6 +15267,11 @@ typedef INT16_TYPE i16; /* 2-byte signed integer */ typedef UINT8_TYPE u8; /* 1-byte unsigned integer */ typedef INT8_TYPE i8; /* 1-byte signed integer */ +/* A bitfield type for use inside of structures. Always follow with :N where +** N is the number of bits. +*/ +typedef unsigned bft; /* Bit Field Type */ + /* ** SQLITE_MAX_U32 is a u64 constant that is the maximum u64 value ** that can be stored in a u32 without loss of data. The value @@ -15356,6 +15440,14 @@ typedef INT16_TYPE LogEst; #define LARGEST_UINT64 (0xffffffff|(((u64)0xffffffff)<<32)) #define SMALLEST_INT64 (((i64)-1) - LARGEST_INT64) +/* +** Macro SMXV(n) return the maximum value that can be held in variable n, +** assuming n is a signed integer type. UMXV(n) is similar for unsigned +** integer types. +*/ +#define SMXV(n) ((((i64)1)<<(sizeof(n)*8-1))-1) +#define UMXV(n) ((((i64)1)<<(sizeof(n)*8))-1) + /* ** Round up a number to the next larger multiple of 8. This is used ** to force 8-byte alignment on 64-bit architectures. @@ -17332,8 +17424,8 @@ SQLITE_PRIVATE int sqlite3NotPureFunc(sqlite3_context*); SQLITE_PRIVATE int sqlite3VdbeBytecodeVtabInit(sqlite3*); #endif -/* Use SQLITE_ENABLE_COMMENTS to enable generation of extra comments on -** each VDBE opcode. +/* Use SQLITE_ENABLE_EXPLAIN_COMMENTS to enable generation of extra +** comments on each VDBE opcode. ** ** Use the SQLITE_ENABLE_MODULE_COMMENTS macro to see some extra no-op ** comments in VDBE programs that show key decision points in the code @@ -18056,6 +18148,10 @@ struct sqlite3 { Savepoint *pSavepoint; /* List of active savepoints */ int nAnalysisLimit; /* Number of index rows to ANALYZE */ int busyTimeout; /* Busy handler timeout, in msec */ +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + int setlkTimeout; /* Blocking lock timeout, in msec. -1 -> inf. */ + int setlkFlags; /* Flags passed to setlk_timeout() */ +#endif int nSavepoint; /* Number of non-transaction savepoints */ int nStatement; /* Number of nested statement-transactions */ i64 nDeferredCons; /* Net deferred constraints this transaction. */ @@ -18610,6 +18706,7 @@ struct CollSeq { #define SQLITE_AFF_INTEGER 0x44 /* 'D' */ #define SQLITE_AFF_REAL 0x45 /* 'E' */ #define SQLITE_AFF_FLEXNUM 0x46 /* 'F' */ +#define SQLITE_AFF_DEFER 0x58 /* 'X' - defer computation until later */ #define sqlite3IsNumericAffinity(X) ((X)>=SQLITE_AFF_NUMERIC) @@ -18734,6 +18831,7 @@ struct Table { } u; Trigger *pTrigger; /* List of triggers on this object */ Schema *pSchema; /* Schema that contains this table */ + u8 aHx[16]; /* Column aHt[K%sizeof(aHt)] might have hash K */ }; /* @@ -18867,9 +18965,13 @@ struct FKey { struct sColMap { /* Mapping of columns in pFrom to columns in zTo */ int iFrom; /* Index of column in pFrom */ char *zCol; /* Name of column in zTo. If NULL use PRIMARY KEY */ - } aCol[1]; /* One entry for each of nCol columns */ + } aCol[FLEXARRAY]; /* One entry for each of nCol columns */ }; +/* The size (in bytes) of an FKey object holding N columns. The answer +** does NOT include space to hold the zTo name. */ +#define SZ_FKEY(N) (offsetof(FKey,aCol)+(N)*sizeof(struct sColMap)) + /* ** SQLite supports many different ways to resolve a constraint ** error. ROLLBACK processing means that a constraint violation @@ -18931,9 +19033,12 @@ struct KeyInfo { u16 nAllField; /* Total columns, including key plus others */ sqlite3 *db; /* The database connection */ u8 *aSortFlags; /* Sort order for each column. */ - CollSeq *aColl[1]; /* Collating sequence for each term of the key */ + CollSeq *aColl[FLEXARRAY]; /* Collating sequence for each term of the key */ }; +/* The size (in bytes) of a KeyInfo object with up to N fields */ +#define SZ_KEYINFO(N) (offsetof(KeyInfo,aColl) + (N)*sizeof(CollSeq*)) + /* ** Allowed bit values for entries in the KeyInfo.aSortFlags[] array. */ @@ -19053,7 +19158,7 @@ struct Index { Pgno tnum; /* DB Page containing root of this index */ LogEst szIdxRow; /* Estimated average row size in bytes */ u16 nKeyCol; /* Number of columns forming the key */ - u16 nColumn; /* Number of columns stored in the index */ + u16 nColumn; /* Nr columns in btree. Can be 2*Table.nCol */ u8 onError; /* OE_Abort, OE_Ignore, OE_Replace, or OE_None */ unsigned idxType:2; /* 0:Normal 1:UNIQUE, 2:PRIMARY KEY, 3:IPK */ unsigned bUnordered:1; /* Use this index for == or IN queries only */ @@ -19062,7 +19167,6 @@ struct Index { unsigned isCovering:1; /* True if this is a covering index */ unsigned noSkipScan:1; /* Do not try to use skip-scan if true */ unsigned hasStat1:1; /* aiRowLogEst values come from sqlite_stat1 */ - unsigned bLowQual:1; /* sqlite_stat1 says this is a low-quality index */ unsigned bNoQuery:1; /* Do not use this index to optimize queries */ unsigned bAscKeyBug:1; /* True if the bba7b69f9849b5bf bug applies */ unsigned bHasVCol:1; /* Index references one or more VIRTUAL columns */ @@ -19152,7 +19256,7 @@ struct AggInfo { ** from source tables rather than from accumulators */ u8 useSortingIdx; /* In direct mode, reference the sorting index rather ** than the source table */ - u16 nSortingColumn; /* Number of columns in the sorting index */ + u32 nSortingColumn; /* Number of columns in the sorting index */ int sortingIdx; /* Cursor number of the sorting index */ int sortingIdxPTab; /* Cursor number of pseudo-table */ int iFirstReg; /* First register in range for aCol[] and aFunc[] */ @@ -19161,8 +19265,8 @@ struct AggInfo { Table *pTab; /* Source table */ Expr *pCExpr; /* The original expression */ int iTable; /* Cursor number of the source table */ - i16 iColumn; /* Column number within the source table */ - i16 iSorterColumn; /* Column number in the sorting index */ + int iColumn; /* Column number within the source table */ + int iSorterColumn; /* Column number in the sorting index */ } *aCol; int nColumn; /* Number of used entries in aCol[] */ int nAccumulator; /* Number of columns that show through to the output. @@ -19337,6 +19441,7 @@ struct Expr { Table *pTab; /* TK_COLUMN: Table containing column. Can be NULL ** for a column of an index on an expression */ Window *pWin; /* EP_WinFunc: Window/Filter defn for a function */ + int nReg; /* TK_NULLS: Number of registers to NULL out */ struct { /* TK_IN, TK_SELECT, and TK_EXISTS */ int iAddr; /* Subroutine entry address */ int regReturn; /* Register used to hold return address */ @@ -19391,10 +19496,10 @@ struct Expr { /* Macros can be used to test, set, or clear bits in the ** Expr.flags field. */ -#define ExprHasProperty(E,P) (((E)->flags&(P))!=0) -#define ExprHasAllProperty(E,P) (((E)->flags&(P))==(P)) -#define ExprSetProperty(E,P) (E)->flags|=(P) -#define ExprClearProperty(E,P) (E)->flags&=~(P) +#define ExprHasProperty(E,P) (((E)->flags&(u32)(P))!=0) +#define ExprHasAllProperty(E,P) (((E)->flags&(u32)(P))==(u32)(P)) +#define ExprSetProperty(E,P) (E)->flags|=(u32)(P) +#define ExprClearProperty(E,P) (E)->flags&=~(u32)(P) #define ExprAlwaysTrue(E) (((E)->flags&(EP_OuterON|EP_IsTrue))==EP_IsTrue) #define ExprAlwaysFalse(E) (((E)->flags&(EP_OuterON|EP_IsFalse))==EP_IsFalse) #define ExprIsFullSize(E) (((E)->flags&(EP_Reduced|EP_TokenOnly))==0) @@ -19506,9 +19611,14 @@ struct ExprList { int iConstExprReg; /* Register in which Expr value is cached. Used only ** by Parse.pConstExpr */ } u; - } a[1]; /* One slot for each expression in the list */ + } a[FLEXARRAY]; /* One slot for each expression in the list */ }; +/* The size (in bytes) of an ExprList object that is big enough to hold +** as many as N expressions. */ +#define SZ_EXPRLIST(N) \ + (offsetof(ExprList,a) + (N)*sizeof(struct ExprList_item)) + /* ** Allowed values for Expr.a.eEName */ @@ -19536,9 +19646,12 @@ struct IdList { int nId; /* Number of identifiers on the list */ struct IdList_item { char *zName; /* Name of the identifier */ - } a[1]; + } a[FLEXARRAY]; }; +/* The size (in bytes) of an IdList object that can hold up to N IDs. */ +#define SZ_IDLIST(N) (offsetof(IdList,a)+(N)*sizeof(struct IdList_item)) + /* ** Allowed values for IdList.eType, which determines which value of the a.u4 ** is valid. @@ -19658,11 +19771,19 @@ struct OnOrUsing { ** */ struct SrcList { - int nSrc; /* Number of tables or subqueries in the FROM clause */ - u32 nAlloc; /* Number of entries allocated in a[] below */ - SrcItem a[1]; /* One entry for each identifier on the list */ + int nSrc; /* Number of tables or subqueries in the FROM clause */ + u32 nAlloc; /* Number of entries allocated in a[] below */ + SrcItem a[FLEXARRAY]; /* One entry for each identifier on the list */ }; +/* Size (in bytes) of a SrcList object that can hold as many as N +** SrcItem objects. */ +#define SZ_SRCLIST(N) (offsetof(SrcList,a)+(N)*sizeof(SrcItem)) + +/* Size (in bytes( of a SrcList object that holds 1 SrcItem. This is a +** special case of SZ_SRCITEM(1) that comes up often. */ +#define SZ_SRCLIST_1 (offsetof(SrcList,a)+sizeof(SrcItem)) + /* ** Permitted values of the SrcList.a.jointype field */ @@ -20131,25 +20252,32 @@ struct Parse { char *zErrMsg; /* An error message */ Vdbe *pVdbe; /* An engine for executing database bytecode */ int rc; /* Return code from execution */ - u8 colNamesSet; /* TRUE after OP_ColumnName has been issued to pVdbe */ - u8 checkSchema; /* Causes schema cookie check after an error */ + LogEst nQueryLoop; /* Est number of iterations of a query (10*log2(N)) */ u8 nested; /* Number of nested calls to the parser/code generator */ u8 nTempReg; /* Number of temporary registers in aTempReg[] */ u8 isMultiWrite; /* True if statement may modify/insert multiple rows */ u8 mayAbort; /* True if statement may throw an ABORT exception */ u8 hasCompound; /* Need to invoke convertCompoundSelectToSubquery() */ - u8 okConstFactor; /* OK to factor out constants */ u8 disableLookaside; /* Number of times lookaside has been disabled */ u8 prepFlags; /* SQLITE_PREPARE_* flags */ u8 withinRJSubrtn; /* Nesting level for RIGHT JOIN body subroutines */ - u8 bHasWith; /* True if statement contains WITH */ u8 mSubrtnSig; /* mini Bloom filter on available SubrtnSig.selId */ + u8 eTriggerOp; /* TK_UPDATE, TK_INSERT or TK_DELETE */ + u8 bReturning; /* Coding a RETURNING trigger */ + u8 eOrconf; /* Default ON CONFLICT policy for trigger steps */ + u8 disableTriggers; /* True to disable triggers */ #if defined(SQLITE_DEBUG) || defined(SQLITE_COVERAGE_TEST) u8 earlyCleanup; /* OOM inside sqlite3ParserAddCleanup() */ #endif #ifdef SQLITE_DEBUG u8 ifNotExists; /* Might be true if IF NOT EXISTS. Assert()s only */ + u8 isCreate; /* CREATE TABLE, INDEX, or VIEW (but not TRIGGER) + ** and ALTER TABLE ADD COLUMN. */ #endif + bft colNamesSet :1; /* TRUE after OP_ColumnName has been issued to pVdbe */ + bft bHasWith :1; /* True if statement contains WITH */ + bft okConstFactor :1; /* OK to factor out constants */ + bft checkSchema :1; /* Causes schema cookie check after an error */ int nRangeReg; /* Size of the temporary register block */ int iRangeReg; /* First register in temporary register block */ int nErr; /* Number of errors seen */ @@ -20164,12 +20292,9 @@ struct Parse { ExprList *pConstExpr;/* Constant expressions */ IndexedExpr *pIdxEpr;/* List of expressions used by active indexes */ IndexedExpr *pIdxPartExpr; /* Exprs constrained by index WHERE clauses */ - Token constraintName;/* Name of the constraint currently being parsed */ yDbMask writeMask; /* Start a write transaction on these databases */ yDbMask cookieMask; /* Bitmask of schema verified databases */ - int regRowid; /* Register holding rowid of CREATE TABLE entry */ - int regRoot; /* Register holding root page number for new objects */ - int nMaxArg; /* Max args passed to user function by sub-program */ + int nMaxArg; /* Max args to xUpdate and xFilter vtab methods */ int nSelect; /* Number of SELECT stmts. Counter for Select.selId */ #ifndef SQLITE_OMIT_PROGRESS_CALLBACK u32 nProgressSteps; /* xProgress steps taken during sqlite3_prepare() */ @@ -20183,17 +20308,6 @@ struct Parse { Table *pTriggerTab; /* Table triggers are being coded for */ TriggerPrg *pTriggerPrg; /* Linked list of coded triggers */ ParseCleanup *pCleanup; /* List of cleanup operations to run after parse */ - union { - int addrCrTab; /* Address of OP_CreateBtree on CREATE TABLE */ - Returning *pReturning; /* The RETURNING clause */ - } u1; - u32 oldmask; /* Mask of old.* columns referenced */ - u32 newmask; /* Mask of new.* columns referenced */ - LogEst nQueryLoop; /* Est number of iterations of a query (10*log2(N)) */ - u8 eTriggerOp; /* TK_UPDATE, TK_INSERT or TK_DELETE */ - u8 bReturning; /* Coding a RETURNING trigger */ - u8 eOrconf; /* Default ON CONFLICT policy for trigger steps */ - u8 disableTriggers; /* True to disable triggers */ /************************************************************************** ** Fields above must be initialized to zero. The fields that follow, @@ -20205,6 +20319,19 @@ struct Parse { int aTempReg[8]; /* Holding area for temporary registers */ Parse *pOuterParse; /* Outer Parse object when nested */ Token sNameToken; /* Token with unqualified schema object name */ + u32 oldmask; /* Mask of old.* columns referenced */ + u32 newmask; /* Mask of new.* columns referenced */ + union { + struct { /* These fields available when isCreate is true */ + int addrCrTab; /* Address of OP_CreateBtree on CREATE TABLE */ + int regRowid; /* Register holding rowid of CREATE TABLE entry */ + int regRoot; /* Register holding root page for new objects */ + Token constraintName; /* Name of the constraint currently being parsed */ + } cr; + struct { /* These fields available to all other statements */ + Returning *pReturning; /* The RETURNING clause */ + } d; + } u1; /************************************************************************ ** Above is constant between recursions. Below is reset before and after @@ -20720,9 +20847,13 @@ struct With { int nCte; /* Number of CTEs in the WITH clause */ int bView; /* Belongs to the outermost Select of a view */ With *pOuter; /* Containing WITH clause, or NULL */ - Cte a[1]; /* For each CTE in the WITH clause.... */ + Cte a[FLEXARRAY]; /* For each CTE in the WITH clause.... */ }; +/* The size (in bytes) of a With object that can hold as many +** as N different CTEs. */ +#define SZ_WITH(N) (offsetof(With,a) + (N)*sizeof(Cte)) + /* ** The Cte object is not guaranteed to persist for the entire duration ** of code generation. (The query flattener or other parser tree @@ -20751,9 +20882,13 @@ struct DbClientData { DbClientData *pNext; /* Next in a linked list */ void *pData; /* The data */ void (*xDestructor)(void*); /* Destructor. Might be NULL */ - char zName[1]; /* Name of this client data. MUST BE LAST */ + char zName[FLEXARRAY]; /* Name of this client data. MUST BE LAST */ }; +/* The size (in bytes) of a DbClientData object that can has a name +** that is N bytes long, including the zero-terminator. */ +#define SZ_DBCLIENTDATA(N) (offsetof(DbClientData,zName)+(N)) + #ifdef SQLITE_DEBUG /* ** An instance of the TreeView object is used for printing the content of @@ -21196,7 +21331,7 @@ SQLITE_PRIVATE void sqlite3SubqueryColumnTypes(Parse*,Table*,Select*,char); SQLITE_PRIVATE Table *sqlite3ResultSetOfSelect(Parse*,Select*,char); SQLITE_PRIVATE void sqlite3OpenSchemaTable(Parse *, int); SQLITE_PRIVATE Index *sqlite3PrimaryKeyIndex(Table*); -SQLITE_PRIVATE i16 sqlite3TableColumnToIndex(Index*, i16); +SQLITE_PRIVATE int sqlite3TableColumnToIndex(Index*, int); #ifdef SQLITE_OMIT_GENERATED_COLUMNS # define sqlite3TableColumnToStorage(T,X) (X) /* No-op pass-through */ # define sqlite3StorageColumnToTable(T,X) (X) /* No-op pass-through */ @@ -21294,7 +21429,7 @@ SQLITE_PRIVATE void sqlite3SrcListAssignCursors(Parse*, SrcList*); SQLITE_PRIVATE void sqlite3IdListDelete(sqlite3*, IdList*); SQLITE_PRIVATE void sqlite3ClearOnOrUsing(sqlite3*, OnOrUsing*); SQLITE_PRIVATE void sqlite3SrcListDelete(sqlite3*, SrcList*); -SQLITE_PRIVATE Index *sqlite3AllocateIndexObject(sqlite3*,i16,int,char**); +SQLITE_PRIVATE Index *sqlite3AllocateIndexObject(sqlite3*,int,int,char**); SQLITE_PRIVATE void sqlite3CreateIndex(Parse*,Token*,Token*,SrcList*,ExprList*,int,Token*, Expr*, int, int, u8); SQLITE_PRIVATE void sqlite3DropIndex(Parse*, SrcList*, int); @@ -21341,6 +21476,7 @@ SQLITE_PRIVATE void sqlite3ExprCodeGeneratedColumn(Parse*, Table*, Column*, int) SQLITE_PRIVATE void sqlite3ExprCodeCopy(Parse*, Expr*, int); SQLITE_PRIVATE void sqlite3ExprCodeFactorable(Parse*, Expr*, int); SQLITE_PRIVATE int sqlite3ExprCodeRunJustOnce(Parse*, Expr*, int); +SQLITE_PRIVATE void sqlite3ExprNullRegisterRange(Parse*, int, int); SQLITE_PRIVATE int sqlite3ExprCodeTemp(Parse*, Expr*, int*); SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse*, Expr*, int); SQLITE_PRIVATE int sqlite3ExprCodeExprList(Parse*, ExprList*, int, int, u8); @@ -21430,7 +21566,8 @@ SQLITE_PRIVATE Select *sqlite3SelectDup(sqlite3*,const Select*,int); SQLITE_PRIVATE FuncDef *sqlite3FunctionSearch(int,const char*); SQLITE_PRIVATE void sqlite3InsertBuiltinFuncs(FuncDef*,int); SQLITE_PRIVATE FuncDef *sqlite3FindFunction(sqlite3*,const char*,int,u8,u8); -SQLITE_PRIVATE void sqlite3QuoteValue(StrAccum*,sqlite3_value*); +SQLITE_PRIVATE void sqlite3QuoteValue(StrAccum*,sqlite3_value*,int); +SQLITE_PRIVATE int sqlite3AppendOneUtf8Character(char*, u32); SQLITE_PRIVATE void sqlite3RegisterBuiltinFunctions(void); SQLITE_PRIVATE void sqlite3RegisterDateTimeFunctions(void); SQLITE_PRIVATE void sqlite3RegisterJsonFunctions(void); @@ -22295,6 +22432,9 @@ static const char * const sqlite3azCompileOpt[] = { #ifdef SQLITE_BUG_COMPATIBLE_20160819 "BUG_COMPATIBLE_20160819", #endif +#ifdef SQLITE_BUG_COMPATIBLE_20250510 + "BUG_COMPATIBLE_20250510", +#endif #ifdef SQLITE_CASE_SENSITIVE_LIKE "CASE_SENSITIVE_LIKE", #endif @@ -22531,6 +22671,9 @@ static const char * const sqlite3azCompileOpt[] = { #ifdef SQLITE_ENABLE_SESSION "ENABLE_SESSION", #endif +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + "ENABLE_SETLK_TIMEOUT", +#endif #ifdef SQLITE_ENABLE_SNAPSHOT "ENABLE_SNAPSHOT", #endif @@ -22585,6 +22728,9 @@ static const char * const sqlite3azCompileOpt[] = { #ifdef SQLITE_EXTRA_INIT "EXTRA_INIT=" CTIMEOPT_VAL(SQLITE_EXTRA_INIT), #endif +#ifdef SQLITE_EXTRA_INIT_MUTEXED + "EXTRA_INIT_MUTEXED=" CTIMEOPT_VAL(SQLITE_EXTRA_INIT_MUTEXED), +#endif #ifdef SQLITE_EXTRA_SHUTDOWN "EXTRA_SHUTDOWN=" CTIMEOPT_VAL(SQLITE_EXTRA_SHUTDOWN), #endif @@ -23569,12 +23715,19 @@ struct VdbeCursor { #endif VdbeTxtBlbCache *pCache; /* Cache of large TEXT or BLOB values */ - /* 2*nField extra array elements allocated for aType[], beyond the one - ** static element declared in the structure. nField total array slots for - ** aType[] and nField+1 array slots for aOffset[] */ - u32 aType[1]; /* Type values record decode. MUST BE LAST */ + /* Space is allocated for aType to hold at least 2*nField+1 entries: + ** nField slots for aType[] and nField+1 array slots for aOffset[] */ + u32 aType[FLEXARRAY]; /* Type values record decode. MUST BE LAST */ }; +/* +** The size (in bytes) of a VdbeCursor object that has an nField value of N +** or less. The value of SZ_VDBECURSOR(n) is guaranteed to be a multiple +** of 8. +*/ +#define SZ_VDBECURSOR(N) \ + (ROUND8(offsetof(VdbeCursor,aType)) + ((N)+1)*sizeof(u64)) + /* Return true if P is a null-only cursor */ #define IsNullCursor(P) \ @@ -23831,13 +23984,16 @@ struct sqlite3_context { u8 enc; /* Encoding to use for results */ u8 skipFlag; /* Skip accumulator loading if true */ u16 argc; /* Number of arguments */ - sqlite3_value *argv[1]; /* Argument set */ + sqlite3_value *argv[FLEXARRAY]; /* Argument set */ }; -/* A bitfield type for use inside of structures. Always follow with :N where -** N is the number of bits. +/* +** The size (in bytes) of an sqlite3_context object that holds N +** argv[] arguments. */ -typedef unsigned bft; /* Bit Field Type */ +#define SZ_CONTEXT(N) \ + (offsetof(sqlite3_context,argv)+(N)*sizeof(sqlite3_value*)) + /* The ScanStatus object holds a single value for the ** sqlite3_stmt_scanstatus() interface. @@ -23898,7 +24054,7 @@ struct Vdbe { i64 nStmtDefCons; /* Number of def. constraints when stmt started */ i64 nStmtDefImmCons; /* Number of def. imm constraints when stmt started */ Mem *aMem; /* The memory locations */ - Mem **apArg; /* Arguments to currently executing user function */ + Mem **apArg; /* Arguments xUpdate and xFilter vtab methods */ VdbeCursor **apCsr; /* One element of this array for each open cursor */ Mem *aVar; /* Values for the OP_Variable opcode. */ @@ -23918,6 +24074,7 @@ struct Vdbe { #ifdef SQLITE_DEBUG int rcApp; /* errcode set by sqlite3_result_error_code() */ u32 nWrite; /* Number of write operations that have occurred */ + int napArg; /* Size of the apArg[] array */ #endif u16 nResColumn; /* Number of columns in one row of the result set */ u16 nResAlloc; /* Column slots allocated to aColName[] */ @@ -23970,7 +24127,7 @@ struct PreUpdate { VdbeCursor *pCsr; /* Cursor to read old values from */ int op; /* One of SQLITE_INSERT, UPDATE, DELETE */ u8 *aRecord; /* old.* database record */ - KeyInfo keyinfo; + KeyInfo *pKeyinfo; /* Key information */ UnpackedRecord *pUnpacked; /* Unpacked version of aRecord[] */ UnpackedRecord *pNewUnpacked; /* Unpacked version of new.* record */ int iNewReg; /* Register for new.* values */ @@ -23982,6 +24139,7 @@ struct PreUpdate { Table *pTab; /* Schema object being updated */ Index *pPk; /* PK index if pTab is WITHOUT ROWID */ sqlite3_value **apDflt; /* Array of default values, if required */ + u8 keyinfoSpace[SZ_KEYINFO(0)]; /* Space to hold pKeyinfo[0] content */ }; /* @@ -24348,8 +24506,9 @@ SQLITE_PRIVATE int sqlite3LookasideUsed(sqlite3 *db, int *pHighwater){ nInit += countLookasideSlots(db->lookaside.pSmallInit); nFree += countLookasideSlots(db->lookaside.pSmallFree); #endif /* SQLITE_OMIT_TWOSIZE_LOOKASIDE */ - if( pHighwater ) *pHighwater = db->lookaside.nSlot - nInit; - return db->lookaside.nSlot - (nInit+nFree); + assert( db->lookaside.nSlot >= nInit+nFree ); + if( pHighwater ) *pHighwater = (int)(db->lookaside.nSlot - nInit); + return (int)(db->lookaside.nSlot - (nInit+nFree)); } /* @@ -24402,7 +24561,7 @@ SQLITE_API int sqlite3_db_status( assert( (op-SQLITE_DBSTATUS_LOOKASIDE_HIT)>=0 ); assert( (op-SQLITE_DBSTATUS_LOOKASIDE_HIT)<3 ); *pCurrent = 0; - *pHighwater = db->lookaside.anStat[op - SQLITE_DBSTATUS_LOOKASIDE_HIT]; + *pHighwater = (int)db->lookaside.anStat[op-SQLITE_DBSTATUS_LOOKASIDE_HIT]; if( resetFlag ){ db->lookaside.anStat[op - SQLITE_DBSTATUS_LOOKASIDE_HIT] = 0; } @@ -25914,7 +26073,7 @@ static int daysAfterMonday(DateTime *pDate){ ** In other words, return the day of the week according ** to this code: ** -** 0=Sunday, 1=Monday, 2=Tues, ..., 6=Saturday +** 0=Sunday, 1=Monday, 2=Tuesday, ..., 6=Saturday */ static int daysAfterSunday(DateTime *pDate){ assert( pDate->validJD ); @@ -30123,6 +30282,8 @@ SQLITE_PRIVATE sqlite3_mutex_methods const *sqlite3DefaultMutex(void){ #ifdef __CYGWIN__ # include +# include /* amalgamator: dontcache */ +# include /* amalgamator: dontcache */ # include /* amalgamator: dontcache */ #endif @@ -31517,17 +31678,17 @@ SQLITE_PRIVATE int sqlite3ApiExit(sqlite3* db, int rc){ #define etPERCENT 7 /* Percent symbol. %% */ #define etCHARX 8 /* Characters. %c */ /* The rest are extensions, not normally found in printf() */ -#define etSQLESCAPE 9 /* Strings with '\'' doubled. %q */ -#define etSQLESCAPE2 10 /* Strings with '\'' doubled and enclosed in '', - NULL pointers replaced by SQL NULL. %Q */ -#define etTOKEN 11 /* a pointer to a Token structure */ -#define etSRCITEM 12 /* a pointer to a SrcItem */ -#define etPOINTER 13 /* The %p conversion */ -#define etSQLESCAPE3 14 /* %w -> Strings with '\"' doubled */ -#define etORDINAL 15 /* %r -> 1st, 2nd, 3rd, 4th, etc. English only */ -#define etDECIMAL 16 /* %d or %u, but not %x, %o */ +#define etESCAPE_q 9 /* Strings with '\'' doubled. %q */ +#define etESCAPE_Q 10 /* Strings with '\'' doubled and enclosed in '', + NULL pointers replaced by SQL NULL. %Q */ +#define etTOKEN 11 /* a pointer to a Token structure */ +#define etSRCITEM 12 /* a pointer to a SrcItem */ +#define etPOINTER 13 /* The %p conversion */ +#define etESCAPE_w 14 /* %w -> Strings with '\"' doubled */ +#define etORDINAL 15 /* %r -> 1st, 2nd, 3rd, 4th, etc. English only */ +#define etDECIMAL 16 /* %d or %u, but not %x, %o */ -#define etINVALID 17 /* Any unrecognized conversion type */ +#define etINVALID 17 /* Any unrecognized conversion type */ /* @@ -31566,9 +31727,9 @@ static const et_info fmtinfo[] = { { 's', 0, 4, etSTRING, 0, 0 }, { 'g', 0, 1, etGENERIC, 30, 0 }, { 'z', 0, 4, etDYNSTRING, 0, 0 }, - { 'q', 0, 4, etSQLESCAPE, 0, 0 }, - { 'Q', 0, 4, etSQLESCAPE2, 0, 0 }, - { 'w', 0, 4, etSQLESCAPE3, 0, 0 }, + { 'q', 0, 4, etESCAPE_q, 0, 0 }, + { 'Q', 0, 4, etESCAPE_Q, 0, 0 }, + { 'w', 0, 4, etESCAPE_w, 0, 0 }, { 'c', 0, 0, etCHARX, 0, 0 }, { 'o', 8, 0, etRADIX, 0, 2 }, { 'u', 10, 0, etDECIMAL, 0, 0 }, @@ -32165,25 +32326,7 @@ SQLITE_API void sqlite3_str_vappendf( } }else{ unsigned int ch = va_arg(ap,unsigned int); - if( ch<0x00080 ){ - buf[0] = ch & 0xff; - length = 1; - }else if( ch<0x00800 ){ - buf[0] = 0xc0 + (u8)((ch>>6)&0x1f); - buf[1] = 0x80 + (u8)(ch & 0x3f); - length = 2; - }else if( ch<0x10000 ){ - buf[0] = 0xe0 + (u8)((ch>>12)&0x0f); - buf[1] = 0x80 + (u8)((ch>>6) & 0x3f); - buf[2] = 0x80 + (u8)(ch & 0x3f); - length = 3; - }else{ - buf[0] = 0xf0 + (u8)((ch>>18) & 0x07); - buf[1] = 0x80 + (u8)((ch>>12) & 0x3f); - buf[2] = 0x80 + (u8)((ch>>6) & 0x3f); - buf[3] = 0x80 + (u8)(ch & 0x3f); - length = 4; - } + length = sqlite3AppendOneUtf8Character(buf, ch); } if( precision>1 ){ i64 nPrior = 1; @@ -32263,22 +32406,31 @@ SQLITE_API void sqlite3_str_vappendf( while( ii>=0 ) if( (bufpt[ii--] & 0xc0)==0x80 ) width++; } break; - case etSQLESCAPE: /* %q: Escape ' characters */ - case etSQLESCAPE2: /* %Q: Escape ' and enclose in '...' */ - case etSQLESCAPE3: { /* %w: Escape " characters */ + case etESCAPE_q: /* %q: Escape ' characters */ + case etESCAPE_Q: /* %Q: Escape ' and enclose in '...' */ + case etESCAPE_w: { /* %w: Escape " characters */ i64 i, j, k, n; - int needQuote, isnull; + int needQuote = 0; char ch; - char q = ((xtype==etSQLESCAPE3)?'"':'\''); /* Quote character */ char *escarg; + char q; if( bArgList ){ escarg = getTextArg(pArgList); }else{ escarg = va_arg(ap,char*); } - isnull = escarg==0; - if( isnull ) escarg = (xtype==etSQLESCAPE2 ? "NULL" : "(NULL)"); + if( escarg==0 ){ + escarg = (xtype==etESCAPE_Q ? "NULL" : "(NULL)"); + }else if( xtype==etESCAPE_Q ){ + needQuote = 1; + } + if( xtype==etESCAPE_w ){ + q = '"'; + flag_alternateform = 0; + }else{ + q = '\''; + } /* For %q, %Q, and %w, the precision is the number of bytes (or ** characters if the ! flags is present) to use from the input. ** Because of the extra quoting characters inserted, the number @@ -32291,7 +32443,30 @@ SQLITE_API void sqlite3_str_vappendf( while( (escarg[i+1]&0xc0)==0x80 ){ i++; } } } - needQuote = !isnull && xtype==etSQLESCAPE2; + if( flag_alternateform ){ + /* For %#q, do unistr()-style backslash escapes for + ** all control characters, and for backslash itself. + ** For %#Q, do the same but only if there is at least + ** one control character. */ + u32 nBack = 0; + u32 nCtrl = 0; + for(k=0; ketBUFSIZE ){ bufpt = zExtra = printfTempBuf(pAccum, n); @@ -32300,13 +32475,41 @@ SQLITE_API void sqlite3_str_vappendf( bufpt = buf; } j = 0; - if( needQuote ) bufpt[j++] = q; + if( needQuote ){ + if( needQuote==2 ){ + memcpy(&bufpt[j], "unistr('", 8); + j += 8; + }else{ + bufpt[j++] = '\''; + } + } k = i; - for(i=0; i=0x10 ? '1' : '0'; + bufpt[j++] = "0123456789abcdef"[ch&0xf]; + } + } + }else{ + for(i=0; imxAlloc>0 && !isMalloced(p) ); - zText = sqlite3DbMallocRaw(p->db, p->nChar+1 ); + zText = sqlite3DbMallocRaw(p->db, 1+(u64)p->nChar ); if( zText ){ memcpy(zText, p->zText, p->nChar+1); p->printfFlags |= SQLITE_PRINTF_MALLOCED; @@ -32794,6 +32997,15 @@ SQLITE_API char *sqlite3_snprintf(int n, char *zBuf, const char *zFormat, ...){ return zBuf; } +/* Maximum size of an sqlite3_log() message. */ +#if defined(SQLITE_MAX_LOG_MESSAGE) + /* Leave the definition as supplied */ +#elif SQLITE_PRINT_BUF_SIZE*10>10000 +# define SQLITE_MAX_LOG_MESSAGE 10000 +#else +# define SQLITE_MAX_LOG_MESSAGE (SQLITE_PRINT_BUF_SIZE*10) +#endif + /* ** This is the routine that actually formats the sqlite3_log() message. ** We house it in a separate routine from sqlite3_log() to avoid using @@ -32810,7 +33022,7 @@ SQLITE_API char *sqlite3_snprintf(int n, char *zBuf, const char *zFormat, ...){ */ static void renderLogMsg(int iErrCode, const char *zFormat, va_list ap){ StrAccum acc; /* String accumulator */ - char zMsg[SQLITE_PRINT_BUF_SIZE*3]; /* Complete log message */ + char zMsg[SQLITE_MAX_LOG_MESSAGE]; /* Complete log message */ sqlite3StrAccumInit(&acc, 0, zMsg, sizeof(zMsg), 0); sqlite3_str_vappendf(&acc, zFormat, ap); @@ -34805,6 +35017,35 @@ static const unsigned char sqlite3Utf8Trans1[] = { } \ } +/* +** Write a single UTF8 character whose value is v into the +** buffer starting at zOut. zOut must be sized to hold at +** least four bytes. Return the number of bytes needed +** to encode the new character. +*/ +SQLITE_PRIVATE int sqlite3AppendOneUtf8Character(char *zOut, u32 v){ + if( v<0x00080 ){ + zOut[0] = (u8)(v & 0xff); + return 1; + } + if( v<0x00800 ){ + zOut[0] = 0xc0 + (u8)((v>>6) & 0x1f); + zOut[1] = 0x80 + (u8)(v & 0x3f); + return 2; + } + if( v<0x10000 ){ + zOut[0] = 0xe0 + (u8)((v>>12) & 0x0f); + zOut[1] = 0x80 + (u8)((v>>6) & 0x3f); + zOut[2] = 0x80 + (u8)(v & 0x3f); + return 3; + } + zOut[0] = 0xf0 + (u8)((v>>18) & 0x07); + zOut[1] = 0x80 + (u8)((v>>12) & 0x3f); + zOut[2] = 0x80 + (u8)((v>>6) & 0x3f); + zOut[3] = 0x80 + (u8)(v & 0x3f); + return 4; +} + /* ** Translate a single UTF-8 character. Return the unicode value. ** @@ -35226,7 +35467,7 @@ SQLITE_PRIVATE int sqlite3Utf16ByteLen(const void *zIn, int nByte, int nChar){ int n = 0; if( SQLITE_UTF16NATIVE==SQLITE_UTF16LE ) z++; - while( n=0xd8 && c<0xdc && z<=zEnd && z[0]>=0xdc && z[0]<0xe0 ) z += 2; @@ -36401,7 +36642,11 @@ SQLITE_PRIVATE void sqlite3FpDecode(FpDecode *p, double r, int iRound, int mxRou } p->z = &p->zBuf[i+1]; assert( i+p->n < sizeof(p->zBuf) ); - while( ALWAYS(p->n>0) && p->z[p->n-1]=='0' ){ p->n--; } + assert( p->n>0 ); + while( p->z[p->n-1]=='0' ){ + p->n--; + assert( p->n>0 ); + } } /* @@ -36906,7 +37151,7 @@ SQLITE_PRIVATE int sqlite3MulInt64(i64 *pA, i64 iB){ } /* -** Compute the absolute value of a 32-bit signed integer, of possible. Or +** Compute the absolute value of a 32-bit signed integer, if possible. Or ** if the integer has a value of -2147483648, return +2147483647 */ SQLITE_PRIVATE int sqlite3AbsInt32(int x){ @@ -37187,12 +37432,19 @@ SQLITE_PRIVATE void sqlite3HashClear(Hash *pH){ */ static unsigned int strHash(const char *z){ unsigned int h = 0; - unsigned char c; - while( (c = (unsigned char)*z++)!=0 ){ /*OPTIMIZATION-IF-TRUE*/ + while( z[0] ){ /*OPTIMIZATION-IF-TRUE*/ /* Knuth multiplicative hashing. (Sorting & Searching, p. 510). ** 0x9e3779b1 is 2654435761 which is the closest prime number to - ** (2**32)*golden_ratio, where golden_ratio = (sqrt(5) - 1)/2. */ - h += sqlite3UpperToLower[c]; + ** (2**32)*golden_ratio, where golden_ratio = (sqrt(5) - 1)/2. + ** + ** Only bits 0xdf for ASCII and bits 0xbf for EBCDIC each octet are + ** hashed since the omitted bits determine the upper/lower case difference. + */ +#ifdef SQLITE_EBCDIC + h += 0xbf & (unsigned char)*(z++); +#else + h += 0xdf & (unsigned char)*(z++); +#endif h *= 0x9e3779b1; } return h; @@ -37265,9 +37517,8 @@ static int rehash(Hash *pH, unsigned int new_size){ pH->htsize = new_size = sqlite3MallocSize(new_ht)/sizeof(struct _ht); memset(new_ht, 0, new_size*sizeof(struct _ht)); for(elem=pH->first, pH->first=0; elem; elem = next_elem){ - unsigned int h = strHash(elem->pKey) % new_size; next_elem = elem->next; - insertElement(pH, &new_ht[h], elem); + insertElement(pH, &new_ht[elem->h % new_size], elem); } return 1; } @@ -37285,23 +37536,22 @@ static HashElem *findElementWithHash( HashElem *elem; /* Used to loop thru the element list */ unsigned int count; /* Number of elements left to test */ unsigned int h; /* The computed hash */ - static HashElem nullElement = { 0, 0, 0, 0 }; + static HashElem nullElement = { 0, 0, 0, 0, 0 }; + h = strHash(pKey); if( pH->ht ){ /*OPTIMIZATION-IF-TRUE*/ struct _ht *pEntry; - h = strHash(pKey) % pH->htsize; - pEntry = &pH->ht[h]; + pEntry = &pH->ht[h % pH->htsize]; elem = pEntry->chain; count = pEntry->count; }else{ - h = 0; elem = pH->first; count = pH->count; } if( pHash ) *pHash = h; while( count ){ assert( elem!=0 ); - if( sqlite3StrICmp(elem->pKey,pKey)==0 ){ + if( h==elem->h && sqlite3StrICmp(elem->pKey,pKey)==0 ){ return elem; } elem = elem->next; @@ -37313,10 +37563,9 @@ static HashElem *findElementWithHash( /* Remove a single entry from the hash table given a pointer to that ** element and a hash on the element's key. */ -static void removeElementGivenHash( +static void removeElement( Hash *pH, /* The pH containing "elem" */ - HashElem* elem, /* The element to be removed from the pH */ - unsigned int h /* Hash value for the element */ + HashElem *elem /* The element to be removed from the pH */ ){ struct _ht *pEntry; if( elem->prev ){ @@ -37328,7 +37577,7 @@ static void removeElementGivenHash( elem->next->prev = elem->prev; } if( pH->ht ){ - pEntry = &pH->ht[h]; + pEntry = &pH->ht[elem->h % pH->htsize]; if( pEntry->chain==elem ){ pEntry->chain = elem->next; } @@ -37379,7 +37628,7 @@ SQLITE_PRIVATE void *sqlite3HashInsert(Hash *pH, const char *pKey, void *data){ if( elem->data ){ void *old_data = elem->data; if( data==0 ){ - removeElementGivenHash(pH,elem,h); + removeElement(pH,elem); }else{ elem->data = data; elem->pKey = pKey; @@ -37390,15 +37639,13 @@ SQLITE_PRIVATE void *sqlite3HashInsert(Hash *pH, const char *pKey, void *data){ new_elem = (HashElem*)sqlite3Malloc( sizeof(HashElem) ); if( new_elem==0 ) return data; new_elem->pKey = pKey; + new_elem->h = h; new_elem->data = data; pH->count++; - if( pH->count>=10 && pH->count > 2*pH->htsize ){ - if( rehash(pH, pH->count*2) ){ - assert( pH->htsize>0 ); - h = strHash(pKey) % pH->htsize; - } + if( pH->count>=5 && pH->count > 2*pH->htsize ){ + rehash(pH, pH->count*3); } - insertElement(pH, pH->ht ? &pH->ht[h] : 0, new_elem); + insertElement(pH, pH->ht ? &pH->ht[new_elem->h % pH->htsize] : 0, new_elem); return 0; } @@ -38881,6 +39128,7 @@ struct unixFile { #endif #ifdef SQLITE_ENABLE_SETLK_TIMEOUT unsigned iBusyTimeout; /* Wait this many millisec on locks */ + int bBlockOnConnect; /* True to block for SHARED locks */ #endif #if OS_VXWORKS struct vxworksFileId *pId; /* Unique file ID */ @@ -40274,6 +40522,13 @@ static int unixFileLock(unixFile *pFile, struct flock *pLock){ rc = 0; } }else{ +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + if( pFile->bBlockOnConnect && pLock->l_type==F_RDLCK + && pLock->l_start==SHARED_FIRST && pLock->l_len==SHARED_SIZE + ){ + rc = osFcntl(pFile->h, F_SETLKW, pLock); + }else +#endif rc = osSetPosixAdvisoryLock(pFile->h, pLock, pFile); } return rc; @@ -42635,8 +42890,9 @@ static int unixFileControl(sqlite3_file *id, int op, void *pArg){ #ifdef SQLITE_ENABLE_SETLK_TIMEOUT case SQLITE_FCNTL_LOCK_TIMEOUT: { int iOld = pFile->iBusyTimeout; + int iNew = *(int*)pArg; #if SQLITE_ENABLE_SETLK_TIMEOUT==1 - pFile->iBusyTimeout = *(int*)pArg; + pFile->iBusyTimeout = iNew<0 ? 0x7FFFFFFF : (unsigned)iNew; #elif SQLITE_ENABLE_SETLK_TIMEOUT==2 pFile->iBusyTimeout = !!(*(int*)pArg); #else @@ -42645,7 +42901,12 @@ static int unixFileControl(sqlite3_file *id, int op, void *pArg){ *(int*)pArg = iOld; return SQLITE_OK; } -#endif + case SQLITE_FCNTL_BLOCK_ON_CONNECT: { + int iNew = *(int*)pArg; + pFile->bBlockOnConnect = iNew; + return SQLITE_OK; + } +#endif /* SQLITE_ENABLE_SETLK_TIMEOUT */ #if SQLITE_MAX_MMAP_SIZE>0 case SQLITE_FCNTL_MMAP_SIZE: { i64 newLimit = *(i64*)pArg; @@ -43618,21 +43879,20 @@ static int unixShmLock( /* Check that, if this to be a blocking lock, no locks that occur later ** in the following list than the lock being obtained are already held: ** - ** 1. Checkpointer lock (ofst==1). - ** 2. Write lock (ofst==0). - ** 3. Read locks (ofst>=3 && ofst=3 && ofstexclMask|p->sharedMask); assert( (flags & SQLITE_SHM_UNLOCK) || pDbFd->iBusyTimeout==0 || ( - (ofst!=2) /* not RECOVER */ + (ofst!=2 || lockMask==0) && (ofst!=1 || lockMask==0 || lockMask==2) && (ofst!=0 || lockMask<3) && (ofst<3 || lockMask<(1<iBusyTimeout +#else +# define winFileBusyTimeout(pDbFd) 0 +#endif + /* ** The winVfsAppData structure is used for the pAppData member for all of the ** Win32 VFS variants. @@ -47478,7 +47748,7 @@ static struct win_syscall { { "FileTimeToLocalFileTime", (SYSCALL)0, 0 }, #endif -#define osFileTimeToLocalFileTime ((BOOL(WINAPI*)(CONST FILETIME*, \ +#define osFileTimeToLocalFileTime ((BOOL(WINAPI*)(const FILETIME*, \ LPFILETIME))aSyscall[11].pCurrent) #if SQLITE_OS_WINCE @@ -47487,7 +47757,7 @@ static struct win_syscall { { "FileTimeToSystemTime", (SYSCALL)0, 0 }, #endif -#define osFileTimeToSystemTime ((BOOL(WINAPI*)(CONST FILETIME*, \ +#define osFileTimeToSystemTime ((BOOL(WINAPI*)(const FILETIME*, \ LPSYSTEMTIME))aSyscall[12].pCurrent) { "FlushFileBuffers", (SYSCALL)FlushFileBuffers, 0 }, @@ -47593,6 +47863,12 @@ static struct win_syscall { #define osGetFullPathNameW ((DWORD(WINAPI*)(LPCWSTR,DWORD,LPWSTR, \ LPWSTR*))aSyscall[25].pCurrent) +/* +** For GetLastError(), MSDN says: +** +** Minimum supported client: Windows XP [desktop apps | UWP apps] +** Minimum supported server: Windows Server 2003 [desktop apps | UWP apps] +*/ { "GetLastError", (SYSCALL)GetLastError, 0 }, #define osGetLastError ((DWORD(WINAPI*)(VOID))aSyscall[26].pCurrent) @@ -47761,7 +48037,7 @@ static struct win_syscall { { "LockFile", (SYSCALL)0, 0 }, #endif -#ifndef osLockFile +#if !defined(osLockFile) && defined(SQLITE_WIN32_HAS_ANSI) #define osLockFile ((BOOL(WINAPI*)(HANDLE,DWORD,DWORD,DWORD, \ DWORD))aSyscall[47].pCurrent) #endif @@ -47825,7 +48101,7 @@ static struct win_syscall { { "SystemTimeToFileTime", (SYSCALL)SystemTimeToFileTime, 0 }, -#define osSystemTimeToFileTime ((BOOL(WINAPI*)(CONST SYSTEMTIME*, \ +#define osSystemTimeToFileTime ((BOOL(WINAPI*)(const SYSTEMTIME*, \ LPFILETIME))aSyscall[56].pCurrent) #if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT @@ -47834,7 +48110,7 @@ static struct win_syscall { { "UnlockFile", (SYSCALL)0, 0 }, #endif -#ifndef osUnlockFile +#if !defined(osUnlockFile) && defined(SQLITE_WIN32_HAS_ANSI) #define osUnlockFile ((BOOL(WINAPI*)(HANDLE,DWORD,DWORD,DWORD, \ DWORD))aSyscall[57].pCurrent) #endif @@ -47875,11 +48151,13 @@ static struct win_syscall { #define osCreateEventExW ((HANDLE(WINAPI*)(LPSECURITY_ATTRIBUTES,LPCWSTR, \ DWORD,DWORD))aSyscall[62].pCurrent) -#if !SQLITE_OS_WINRT +/* +** For WaitForSingleObject(), MSDN says: +** +** Minimum supported client: Windows XP [desktop apps | UWP apps] +** Minimum supported server: Windows Server 2003 [desktop apps | UWP apps] +*/ { "WaitForSingleObject", (SYSCALL)WaitForSingleObject, 0 }, -#else - { "WaitForSingleObject", (SYSCALL)0, 0 }, -#endif #define osWaitForSingleObject ((DWORD(WINAPI*)(HANDLE, \ DWORD))aSyscall[63].pCurrent) @@ -48026,6 +48304,97 @@ static struct win_syscall { #define osFlushViewOfFile \ ((BOOL(WINAPI*)(LPCVOID,SIZE_T))aSyscall[79].pCurrent) +/* +** If SQLITE_ENABLE_SETLK_TIMEOUT is defined, we require CreateEvent() +** to implement blocking locks with timeouts. MSDN says: +** +** Minimum supported client: Windows XP [desktop apps | UWP apps] +** Minimum supported server: Windows Server 2003 [desktop apps | UWP apps] +*/ +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + { "CreateEvent", (SYSCALL)CreateEvent, 0 }, +#else + { "CreateEvent", (SYSCALL)0, 0 }, +#endif + +#define osCreateEvent ( \ + (HANDLE(WINAPI*) (LPSECURITY_ATTRIBUTES,BOOL,BOOL,LPCSTR)) \ + aSyscall[80].pCurrent \ +) + +/* +** If SQLITE_ENABLE_SETLK_TIMEOUT is defined, we require CancelIo() +** for the case where a timeout expires and a lock request must be +** cancelled. +** +** Minimum supported client: Windows XP [desktop apps | UWP apps] +** Minimum supported server: Windows Server 2003 [desktop apps | UWP apps] +*/ +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + { "CancelIo", (SYSCALL)CancelIo, 0 }, +#else + { "CancelIo", (SYSCALL)0, 0 }, +#endif + +#define osCancelIo ((BOOL(WINAPI*)(HANDLE))aSyscall[81].pCurrent) + +#if defined(SQLITE_WIN32_HAS_WIDE) && defined(_WIN32) + { "GetModuleHandleW", (SYSCALL)GetModuleHandleW, 0 }, +#else + { "GetModuleHandleW", (SYSCALL)0, 0 }, +#endif + +#define osGetModuleHandleW ((HMODULE(WINAPI*)(LPCWSTR))aSyscall[82].pCurrent) + +#ifndef _WIN32 + { "getenv", (SYSCALL)getenv, 0 }, +#else + { "getenv", (SYSCALL)0, 0 }, +#endif + +#define osGetenv ((const char *(*)(const char *))aSyscall[83].pCurrent) + +#ifndef _WIN32 + { "getcwd", (SYSCALL)getcwd, 0 }, +#else + { "getcwd", (SYSCALL)0, 0 }, +#endif + +#define osGetcwd ((char*(*)(char*,size_t))aSyscall[84].pCurrent) + +#ifndef _WIN32 + { "readlink", (SYSCALL)readlink, 0 }, +#else + { "readlink", (SYSCALL)0, 0 }, +#endif + +#define osReadlink ((ssize_t(*)(const char*,char*,size_t))aSyscall[85].pCurrent) + +#ifndef _WIN32 + { "lstat", (SYSCALL)lstat, 0 }, +#else + { "lstat", (SYSCALL)0, 0 }, +#endif + +#define osLstat ((int(*)(const char*,struct stat*))aSyscall[86].pCurrent) + +#ifndef _WIN32 + { "__errno", (SYSCALL)__errno, 0 }, +#else + { "__errno", (SYSCALL)0, 0 }, +#endif + +#define osErrno (*((int*(*)(void))aSyscall[87].pCurrent)()) + +#ifndef _WIN32 + { "cygwin_conv_path", (SYSCALL)cygwin_conv_path, 0 }, +#else + { "cygwin_conv_path", (SYSCALL)0, 0 }, +#endif + +#define osCygwin_conv_path ((size_t(*)(unsigned int, \ + const void *, void *, size_t))aSyscall[88].pCurrent) + }; /* End of the overrideable system calls */ /* @@ -48199,6 +48568,7 @@ SQLITE_API int sqlite3_win32_reset_heap(){ } #endif /* SQLITE_WIN32_MALLOC */ +#ifdef _WIN32 /* ** This function outputs the specified (ANSI) string to the Win32 debugger ** (if available). @@ -48241,6 +48611,7 @@ SQLITE_API void sqlite3_win32_write_debug(const char *zBuf, int nBuf){ } #endif } +#endif /* _WIN32 */ /* ** The following routine suspends the current thread for at least ms @@ -48324,7 +48695,9 @@ SQLITE_API int sqlite3_win32_is_nt(void){ } return osInterlockedCompareExchange(&sqlite3_os_type, 2, 2)==2; #elif SQLITE_TEST - return osInterlockedCompareExchange(&sqlite3_os_type, 2, 2)==2; + return osInterlockedCompareExchange(&sqlite3_os_type, 2, 2)==2 + || osInterlockedCompareExchange(&sqlite3_os_type, 0, 0)==0 + ; #else /* ** NOTE: All sub-platforms where the GetVersionEx[AW] functions are @@ -48539,6 +48912,7 @@ SQLITE_PRIVATE void sqlite3MemSetDefault(void){ } #endif /* SQLITE_WIN32_MALLOC */ +#ifdef _WIN32 /* ** Convert a UTF-8 string to Microsoft Unicode. ** @@ -48564,6 +48938,7 @@ static LPWSTR winUtf8ToUnicode(const char *zText){ } return zWideText; } +#endif /* _WIN32 */ /* ** Convert a Microsoft Unicode string to UTF-8. @@ -48598,28 +48973,29 @@ static char *winUnicodeToUtf8(LPCWSTR zWideText){ ** Space to hold the returned string is obtained from sqlite3_malloc(). */ static LPWSTR winMbcsToUnicode(const char *zText, int useAnsi){ - int nByte; + int nWideChar; LPWSTR zMbcsText; int codepage = useAnsi ? CP_ACP : CP_OEMCP; - nByte = osMultiByteToWideChar(codepage, 0, zText, -1, NULL, - 0)*sizeof(WCHAR); - if( nByte==0 ){ + nWideChar = osMultiByteToWideChar(codepage, 0, zText, -1, NULL, + 0); + if( nWideChar==0 ){ return 0; } - zMbcsText = sqlite3MallocZero( nByte*sizeof(WCHAR) ); + zMbcsText = sqlite3MallocZero( nWideChar*sizeof(WCHAR) ); if( zMbcsText==0 ){ return 0; } - nByte = osMultiByteToWideChar(codepage, 0, zText, -1, zMbcsText, - nByte); - if( nByte==0 ){ + nWideChar = osMultiByteToWideChar(codepage, 0, zText, -1, zMbcsText, + nWideChar); + if( nWideChar==0 ){ sqlite3_free(zMbcsText); zMbcsText = 0; } return zMbcsText; } +#ifdef _WIN32 /* ** Convert a Microsoft Unicode string to a multi-byte character string, ** using the ANSI or OEM code page. @@ -48647,6 +49023,7 @@ static char *winUnicodeToMbcs(LPCWSTR zWideText, int useAnsi){ } return zText; } +#endif /* _WIN32 */ /* ** Convert a multi-byte character string to UTF-8. @@ -48666,6 +49043,7 @@ static char *winMbcsToUtf8(const char *zText, int useAnsi){ return zTextUtf8; } +#ifdef _WIN32 /* ** Convert a UTF-8 string to a multi-byte character string. ** @@ -48715,6 +49093,7 @@ SQLITE_API char *sqlite3_win32_unicode_to_utf8(LPCWSTR zWideText){ #endif return winUnicodeToUtf8(zWideText); } +#endif /* _WIN32 */ /* ** This is a public wrapper for the winMbcsToUtf8() function. @@ -48732,6 +49111,7 @@ SQLITE_API char *sqlite3_win32_mbcs_to_utf8(const char *zText){ return winMbcsToUtf8(zText, osAreFileApisANSI()); } +#ifdef _WIN32 /* ** This is a public wrapper for the winMbcsToUtf8() function. */ @@ -48856,6 +49236,7 @@ SQLITE_API int sqlite3_win32_set_directory( ){ return sqlite3_win32_set_directory16(type, zValue); } +#endif /* _WIN32 */ /* ** The return value of winGetLastErrorMsg @@ -49404,13 +49785,98 @@ static BOOL winLockFile( ovlp.Offset = offsetLow; ovlp.OffsetHigh = offsetHigh; return osLockFileEx(*phFile, flags, 0, numBytesLow, numBytesHigh, &ovlp); +#ifdef SQLITE_WIN32_HAS_ANSI }else{ return osLockFile(*phFile, offsetLow, offsetHigh, numBytesLow, numBytesHigh); +#endif } #endif } +/* +** Lock a region of nByte bytes starting at offset offset of file hFile. +** Take an EXCLUSIVE lock if parameter bExclusive is true, or a SHARED lock +** otherwise. If nMs is greater than zero and the lock cannot be obtained +** immediately, block for that many ms before giving up. +** +** This function returns SQLITE_OK if the lock is obtained successfully. If +** some other process holds the lock, SQLITE_BUSY is returned if nMs==0, or +** SQLITE_BUSY_TIMEOUT otherwise. Or, if an error occurs, SQLITE_IOERR. +*/ +static int winHandleLockTimeout( + HANDLE hFile, + DWORD offset, + DWORD nByte, + int bExcl, + DWORD nMs +){ + DWORD flags = LOCKFILE_FAIL_IMMEDIATELY | (bExcl?LOCKFILE_EXCLUSIVE_LOCK:0); + int rc = SQLITE_OK; + BOOL ret; + + if( !osIsNT() ){ + ret = winLockFile(&hFile, flags, offset, 0, nByte, 0); + }else{ + OVERLAPPED ovlp; + memset(&ovlp, 0, sizeof(OVERLAPPED)); + ovlp.Offset = offset; + +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + if( nMs!=0 ){ + flags &= ~LOCKFILE_FAIL_IMMEDIATELY; + } + ovlp.hEvent = osCreateEvent(NULL, TRUE, FALSE, NULL); + if( ovlp.hEvent==NULL ){ + return SQLITE_IOERR_LOCK; + } +#endif + + ret = osLockFileEx(hFile, flags, 0, nByte, 0, &ovlp); + +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + /* If SQLITE_ENABLE_SETLK_TIMEOUT is defined, then the file-handle was + ** opened with FILE_FLAG_OVERHEAD specified. In this case, the call to + ** LockFileEx() may fail because the request is still pending. This can + ** happen even if LOCKFILE_FAIL_IMMEDIATELY was specified. + ** + ** If nMs is 0, then LOCKFILE_FAIL_IMMEDIATELY was set in the flags + ** passed to LockFileEx(). In this case, if the operation is pending, + ** block indefinitely until it is finished. + ** + ** Otherwise, wait for up to nMs ms for the operation to finish. nMs + ** may be set to INFINITE. + */ + if( !ret && GetLastError()==ERROR_IO_PENDING ){ + DWORD nDelay = (nMs==0 ? INFINITE : nMs); + DWORD res = osWaitForSingleObject(ovlp.hEvent, nDelay); + if( res==WAIT_OBJECT_0 ){ + ret = TRUE; + }else if( res==WAIT_TIMEOUT ){ +#if SQLITE_ENABLE_SETLK_TIMEOUT==1 + rc = SQLITE_BUSY_TIMEOUT; +#else + rc = SQLITE_BUSY; +#endif + }else{ + /* Some other error has occurred */ + rc = SQLITE_IOERR_LOCK; + } + + /* If it is still pending, cancel the LockFileEx() call. */ + osCancelIo(hFile); + } + + osCloseHandle(ovlp.hEvent); +#endif + } + + if( rc==SQLITE_OK && !ret ){ + rc = SQLITE_BUSY; + } + return rc; +} + /* ** Unlock a file region. */ @@ -49435,13 +49901,23 @@ static BOOL winUnlockFile( ovlp.Offset = offsetLow; ovlp.OffsetHigh = offsetHigh; return osUnlockFileEx(*phFile, 0, numBytesLow, numBytesHigh, &ovlp); +#ifdef SQLITE_WIN32_HAS_ANSI }else{ return osUnlockFile(*phFile, offsetLow, offsetHigh, numBytesLow, numBytesHigh); +#endif } #endif } +/* +** Remove an nByte lock starting at offset iOff from HANDLE h. +*/ +static int winHandleUnlock(HANDLE h, int iOff, int nByte){ + BOOL ret = winUnlockFile(&h, iOff, 0, nByte, 0); + return (ret ? SQLITE_OK : SQLITE_IOERR_UNLOCK); +} + /***************************************************************************** ** The next group of routines implement the I/O methods specified ** by the sqlite3_io_methods object. @@ -49455,66 +49931,70 @@ static BOOL winUnlockFile( #endif /* -** Move the current position of the file handle passed as the first -** argument to offset iOffset within the file. If successful, return 0. -** Otherwise, set pFile->lastErrno and return non-zero. +** Seek the file handle h to offset nByte of the file. +** +** If successful, return SQLITE_OK. Or, if an error occurs, return an SQLite +** error code. */ -static int winSeekFile(winFile *pFile, sqlite3_int64 iOffset){ +static int winHandleSeek(HANDLE h, sqlite3_int64 iOffset){ + int rc = SQLITE_OK; /* Return value */ + #if !SQLITE_OS_WINRT LONG upperBits; /* Most sig. 32 bits of new offset */ LONG lowerBits; /* Least sig. 32 bits of new offset */ DWORD dwRet; /* Value returned by SetFilePointer() */ - DWORD lastErrno; /* Value returned by GetLastError() */ - - OSTRACE(("SEEK file=%p, offset=%lld\n", pFile->h, iOffset)); upperBits = (LONG)((iOffset>>32) & 0x7fffffff); lowerBits = (LONG)(iOffset & 0xffffffff); + dwRet = osSetFilePointer(h, lowerBits, &upperBits, FILE_BEGIN); + /* API oddity: If successful, SetFilePointer() returns a dword ** containing the lower 32-bits of the new file-offset. Or, if it fails, ** it returns INVALID_SET_FILE_POINTER. However according to MSDN, ** INVALID_SET_FILE_POINTER may also be a valid new offset. So to determine ** whether an error has actually occurred, it is also necessary to call - ** GetLastError(). - */ - dwRet = osSetFilePointer(pFile->h, lowerBits, &upperBits, FILE_BEGIN); - - if( (dwRet==INVALID_SET_FILE_POINTER - && ((lastErrno = osGetLastError())!=NO_ERROR)) ){ - pFile->lastErrno = lastErrno; - winLogError(SQLITE_IOERR_SEEK, pFile->lastErrno, - "winSeekFile", pFile->zPath); - OSTRACE(("SEEK file=%p, rc=SQLITE_IOERR_SEEK\n", pFile->h)); - return 1; + ** GetLastError(). */ + if( dwRet==INVALID_SET_FILE_POINTER ){ + DWORD lastErrno = osGetLastError(); + if( lastErrno!=NO_ERROR ){ + rc = SQLITE_IOERR_SEEK; + } } - - OSTRACE(("SEEK file=%p, rc=SQLITE_OK\n", pFile->h)); - return 0; #else - /* - ** Same as above, except that this implementation works for WinRT. - */ - + /* This implementation works for WinRT. */ LARGE_INTEGER x; /* The new offset */ BOOL bRet; /* Value returned by SetFilePointerEx() */ x.QuadPart = iOffset; - bRet = osSetFilePointerEx(pFile->h, x, 0, FILE_BEGIN); + bRet = osSetFilePointerEx(h, x, 0, FILE_BEGIN); if(!bRet){ - pFile->lastErrno = osGetLastError(); - winLogError(SQLITE_IOERR_SEEK, pFile->lastErrno, - "winSeekFile", pFile->zPath); - OSTRACE(("SEEK file=%p, rc=SQLITE_IOERR_SEEK\n", pFile->h)); - return 1; + rc = SQLITE_IOERR_SEEK; } - - OSTRACE(("SEEK file=%p, rc=SQLITE_OK\n", pFile->h)); - return 0; #endif + + OSTRACE(("SEEK file=%p, offset=%lld rc=%s\n", h, iOffset, sqlite3ErrName(rc))); + return rc; } +/* +** Move the current position of the file handle passed as the first +** argument to offset iOffset within the file. If successful, return 0. +** Otherwise, set pFile->lastErrno and return non-zero. +*/ +static int winSeekFile(winFile *pFile, sqlite3_int64 iOffset){ + int rc; + + rc = winHandleSeek(pFile->h, iOffset); + if( rc!=SQLITE_OK ){ + pFile->lastErrno = osGetLastError(); + winLogError(rc, pFile->lastErrno, "winSeekFile", pFile->zPath); + } + return rc; +} + + #if SQLITE_MAX_MMAP_SIZE>0 /* Forward references to VFS helper methods used for memory mapped files */ static int winMapfile(winFile*, sqlite3_int64); @@ -49774,6 +50254,60 @@ static int winWrite( return SQLITE_OK; } +/* +** Truncate the file opened by handle h to nByte bytes in size. +*/ +static int winHandleTruncate(HANDLE h, sqlite3_int64 nByte){ + int rc = SQLITE_OK; /* Return code */ + rc = winHandleSeek(h, nByte); + if( rc==SQLITE_OK ){ + if( 0==osSetEndOfFile(h) ){ + rc = SQLITE_IOERR_TRUNCATE; + } + } + return rc; +} + +/* +** Determine the size in bytes of the file opened by the handle passed as +** the first argument. +*/ +static int winHandleSize(HANDLE h, sqlite3_int64 *pnByte){ + int rc = SQLITE_OK; + +#if SQLITE_OS_WINRT + FILE_STANDARD_INFO info; + BOOL b; + b = osGetFileInformationByHandleEx(h, FileStandardInfo, &info, sizeof(info)); + if( b ){ + *pnByte = info.EndOfFile.QuadPart; + }else{ + rc = SQLITE_IOERR_FSTAT; + } +#else + DWORD upperBits = 0; + DWORD lowerBits = 0; + + assert( pnByte ); + lowerBits = osGetFileSize(h, &upperBits); + *pnByte = (((sqlite3_int64)upperBits)<<32) + lowerBits; + if( lowerBits==INVALID_FILE_SIZE && osGetLastError()!=NO_ERROR ){ + rc = SQLITE_IOERR_FSTAT; + } +#endif + + return rc; +} + +/* +** Close the handle passed as the only argument. +*/ +static void winHandleClose(HANDLE h){ + if( h!=INVALID_HANDLE_VALUE ){ + osCloseHandle(h); + } +} + /* ** Truncate an open file to a specified size */ @@ -50029,8 +50563,9 @@ static int winFileSize(sqlite3_file *id, sqlite3_int64 *pSize){ ** Different API routines are called depending on whether or not this ** is Win9x or WinNT. */ -static int winGetReadLock(winFile *pFile){ +static int winGetReadLock(winFile *pFile, int bBlock){ int res; + DWORD mask = ~(bBlock ? LOCKFILE_FAIL_IMMEDIATELY : 0); OSTRACE(("READ-LOCK file=%p, lock=%d\n", pFile->h, pFile->locktype)); if( osIsNT() ){ #if SQLITE_OS_WINCE @@ -50040,7 +50575,7 @@ static int winGetReadLock(winFile *pFile){ */ res = winceLockFile(&pFile->h, SHARED_FIRST, 0, 1, 0); #else - res = winLockFile(&pFile->h, SQLITE_LOCKFILEEX_FLAGS, SHARED_FIRST, 0, + res = winLockFile(&pFile->h, SQLITE_LOCKFILEEX_FLAGS&mask, SHARED_FIRST, 0, SHARED_SIZE, 0); #endif } @@ -50049,7 +50584,7 @@ static int winGetReadLock(winFile *pFile){ int lk; sqlite3_randomness(sizeof(lk), &lk); pFile->sharedLockByte = (short)((lk & 0x7fffffff)%(SHARED_SIZE - 1)); - res = winLockFile(&pFile->h, SQLITE_LOCKFILE_FLAGS, + res = winLockFile(&pFile->h, SQLITE_LOCKFILE_FLAGS&mask, SHARED_FIRST+pFile->sharedLockByte, 0, 1, 0); } #endif @@ -50144,46 +50679,62 @@ static int winLock(sqlite3_file *id, int locktype){ assert( locktype!=PENDING_LOCK ); assert( locktype!=RESERVED_LOCK || pFile->locktype==SHARED_LOCK ); - /* Lock the PENDING_LOCK byte if we need to acquire a PENDING lock or + /* Lock the PENDING_LOCK byte if we need to acquire an EXCLUSIVE lock or ** a SHARED lock. If we are acquiring a SHARED lock, the acquisition of ** the PENDING_LOCK byte is temporary. */ newLocktype = pFile->locktype; - if( pFile->locktype==NO_LOCK - || (locktype==EXCLUSIVE_LOCK && pFile->locktype<=RESERVED_LOCK) + if( locktype==SHARED_LOCK + || (locktype==EXCLUSIVE_LOCK && pFile->locktype==RESERVED_LOCK) ){ int cnt = 3; - while( cnt-->0 && (res = winLockFile(&pFile->h, SQLITE_LOCKFILE_FLAGS, - PENDING_BYTE, 0, 1, 0))==0 ){ + + /* Flags for the LockFileEx() call. This should be an exclusive lock if + ** this call is to obtain EXCLUSIVE, or a shared lock if this call is to + ** obtain SHARED. */ + int flags = LOCKFILE_FAIL_IMMEDIATELY; + if( locktype==EXCLUSIVE_LOCK ){ + flags |= LOCKFILE_EXCLUSIVE_LOCK; + } + while( cnt>0 ){ /* Try 3 times to get the pending lock. This is needed to work ** around problems caused by indexing and/or anti-virus software on ** Windows systems. + ** ** If you are using this code as a model for alternative VFSes, do not - ** copy this retry logic. It is a hack intended for Windows only. - */ + ** copy this retry logic. It is a hack intended for Windows only. */ + res = winLockFile(&pFile->h, flags, PENDING_BYTE, 0, 1, 0); + if( res ) break; + lastErrno = osGetLastError(); OSTRACE(("LOCK-PENDING-FAIL file=%p, count=%d, result=%d\n", - pFile->h, cnt, res)); + pFile->h, cnt, res + )); + if( lastErrno==ERROR_INVALID_HANDLE ){ pFile->lastErrno = lastErrno; rc = SQLITE_IOERR_LOCK; OSTRACE(("LOCK-FAIL file=%p, count=%d, rc=%s\n", - pFile->h, cnt, sqlite3ErrName(rc))); + pFile->h, cnt, sqlite3ErrName(rc) + )); return rc; } - if( cnt ) sqlite3_win32_sleep(1); + + cnt--; + if( cnt>0 ) sqlite3_win32_sleep(1); } gotPendingLock = res; - if( !res ){ - lastErrno = osGetLastError(); - } } /* Acquire a shared lock */ if( locktype==SHARED_LOCK && res ){ assert( pFile->locktype==NO_LOCK ); - res = winGetReadLock(pFile); +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + res = winGetReadLock(pFile, pFile->bBlockOnConnect); +#else + res = winGetReadLock(pFile, 0); +#endif if( res ){ newLocktype = SHARED_LOCK; }else{ @@ -50221,7 +50772,7 @@ static int winLock(sqlite3_file *id, int locktype){ newLocktype = EXCLUSIVE_LOCK; }else{ lastErrno = osGetLastError(); - winGetReadLock(pFile); + winGetReadLock(pFile, 0); } } @@ -50301,7 +50852,7 @@ static int winUnlock(sqlite3_file *id, int locktype){ type = pFile->locktype; if( type>=EXCLUSIVE_LOCK ){ winUnlockFile(&pFile->h, SHARED_FIRST, 0, SHARED_SIZE, 0); - if( locktype==SHARED_LOCK && !winGetReadLock(pFile) ){ + if( locktype==SHARED_LOCK && !winGetReadLock(pFile, 0) ){ /* This should never happen. We should always be able to ** reacquire the read lock */ rc = winLogError(SQLITE_IOERR_UNLOCK, osGetLastError(), @@ -50511,6 +51062,28 @@ static int winFileControl(sqlite3_file *id, int op, void *pArg){ return rc; } #endif + +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + case SQLITE_FCNTL_LOCK_TIMEOUT: { + int iOld = pFile->iBusyTimeout; + int iNew = *(int*)pArg; +#if SQLITE_ENABLE_SETLK_TIMEOUT==1 + pFile->iBusyTimeout = (iNew < 0) ? INFINITE : (DWORD)iNew; +#elif SQLITE_ENABLE_SETLK_TIMEOUT==2 + pFile->iBusyTimeout = (DWORD)(!!iNew); +#else +# error "SQLITE_ENABLE_SETLK_TIMEOUT must be set to 1 or 2" +#endif + *(int*)pArg = iOld; + return SQLITE_OK; + } + case SQLITE_FCNTL_BLOCK_ON_CONNECT: { + int iNew = *(int*)pArg; + pFile->bBlockOnConnect = iNew; + return SQLITE_OK; + } +#endif /* SQLITE_ENABLE_SETLK_TIMEOUT */ + } OSTRACE(("FCNTL file=%p, rc=SQLITE_NOTFOUND\n", pFile->h)); return SQLITE_NOTFOUND; @@ -50591,23 +51164,27 @@ static int winShmMutexHeld(void) { ** ** The following fields are read-only after the object is created: ** -** fid ** zFilename ** ** Either winShmNode.mutex must be held or winShmNode.nRef==0 and ** winShmMutexHeld() is true when reading or writing any other field ** in this structure. ** +** File-handle hSharedShm is used to (a) take the DMS lock, (b) truncate +** the *-shm file if the DMS-locking protocol demands it, and (c) map +** regions of the *-shm file into memory using MapViewOfFile() or +** similar. Other locks are taken by individual clients using the +** winShm.hShm handles. */ struct winShmNode { sqlite3_mutex *mutex; /* Mutex to access this object */ char *zFilename; /* Name of the file */ - winFile hFile; /* File handle from winOpen */ + HANDLE hSharedShm; /* File handle open on zFilename */ + int isUnlocked; /* DMS lock has not yet been obtained */ + int isReadonly; /* True if read-only */ int szRegion; /* Size of shared-memory regions */ int nRegion; /* Size of array apRegion */ - u8 isReadonly; /* True if read-only */ - u8 isUnlocked; /* True if no DMS lock held */ struct ShmRegion { HANDLE hMap; /* File handle from CreateFileMapping */ @@ -50616,7 +51193,6 @@ struct winShmNode { DWORD lastErrno; /* The Windows errno from the last I/O error */ int nRef; /* Number of winShm objects pointing to this */ - winShm *pFirst; /* All winShm objects pointing to this */ winShmNode *pNext; /* Next in list of all winShmNode objects */ #if defined(SQLITE_DEBUG) || defined(SQLITE_HAVE_OS_TRACE) u8 nextShmId; /* Next available winShm.id value */ @@ -50632,23 +51208,15 @@ static winShmNode *winShmNodeList = 0; /* ** Structure used internally by this VFS to record the state of an -** open shared memory connection. -** -** The following fields are initialized when this object is created and -** are read-only thereafter: -** -** winShm.pShmNode -** winShm.id -** -** All other fields are read/write. The winShm.pShmNode->mutex must be held -** while accessing any read/write fields. +** open shared memory connection. There is one such structure for each +** winFile open on a wal mode database. */ struct winShm { winShmNode *pShmNode; /* The underlying winShmNode object */ - winShm *pNext; /* Next winShm with the same winShmNode */ - u8 hasMutex; /* True if holding the winShmNode mutex */ u16 sharedMask; /* Mask of shared locks held */ u16 exclMask; /* Mask of exclusive locks held */ + HANDLE hShm; /* File-handle on *-shm file. For locking. */ + int bReadonly; /* True if hShm is opened read-only */ #if defined(SQLITE_DEBUG) || defined(SQLITE_HAVE_OS_TRACE) u8 id; /* Id of this connection with its winShmNode */ #endif @@ -50660,50 +51228,6 @@ struct winShm { #define WIN_SHM_BASE ((22+SQLITE_SHM_NLOCK)*4) /* first lock byte */ #define WIN_SHM_DMS (WIN_SHM_BASE+SQLITE_SHM_NLOCK) /* deadman switch */ -/* -** Apply advisory locks for all n bytes beginning at ofst. -*/ -#define WINSHM_UNLCK 1 -#define WINSHM_RDLCK 2 -#define WINSHM_WRLCK 3 -static int winShmSystemLock( - winShmNode *pFile, /* Apply locks to this open shared-memory segment */ - int lockType, /* WINSHM_UNLCK, WINSHM_RDLCK, or WINSHM_WRLCK */ - int ofst, /* Offset to first byte to be locked/unlocked */ - int nByte /* Number of bytes to lock or unlock */ -){ - int rc = 0; /* Result code form Lock/UnlockFileEx() */ - - /* Access to the winShmNode object is serialized by the caller */ - assert( pFile->nRef==0 || sqlite3_mutex_held(pFile->mutex) ); - - OSTRACE(("SHM-LOCK file=%p, lock=%d, offset=%d, size=%d\n", - pFile->hFile.h, lockType, ofst, nByte)); - - /* Release/Acquire the system-level lock */ - if( lockType==WINSHM_UNLCK ){ - rc = winUnlockFile(&pFile->hFile.h, ofst, 0, nByte, 0); - }else{ - /* Initialize the locking parameters */ - DWORD dwFlags = LOCKFILE_FAIL_IMMEDIATELY; - if( lockType == WINSHM_WRLCK ) dwFlags |= LOCKFILE_EXCLUSIVE_LOCK; - rc = winLockFile(&pFile->hFile.h, dwFlags, ofst, 0, nByte, 0); - } - - if( rc!= 0 ){ - rc = SQLITE_OK; - }else{ - pFile->lastErrno = osGetLastError(); - rc = SQLITE_BUSY; - } - - OSTRACE(("SHM-LOCK file=%p, func=%s, errno=%lu, rc=%s\n", - pFile->hFile.h, (lockType == WINSHM_UNLCK) ? "winUnlockFile" : - "winLockFile", pFile->lastErrno, sqlite3ErrName(rc))); - - return rc; -} - /* Forward references to VFS methods */ static int winOpen(sqlite3_vfs*,const char*,sqlite3_file*,int,int*); static int winDelete(sqlite3_vfs *,const char*,int); @@ -50735,11 +51259,7 @@ static void winShmPurge(sqlite3_vfs *pVfs, int deleteFlag){ osGetCurrentProcessId(), i, bRc ? "ok" : "failed")); UNUSED_VARIABLE_VALUE(bRc); } - if( p->hFile.h!=NULL && p->hFile.h!=INVALID_HANDLE_VALUE ){ - SimulateIOErrorBenign(1); - winClose((sqlite3_file *)&p->hFile); - SimulateIOErrorBenign(0); - } + winHandleClose(p->hSharedShm); if( deleteFlag ){ SimulateIOErrorBenign(1); sqlite3BeginBenignMalloc(); @@ -50757,42 +51277,239 @@ static void winShmPurge(sqlite3_vfs *pVfs, int deleteFlag){ } /* -** The DMS lock has not yet been taken on shm file pShmNode. Attempt to -** take it now. Return SQLITE_OK if successful, or an SQLite error -** code otherwise. -** -** If the DMS cannot be locked because this is a readonly_shm=1 -** connection and no other process already holds a lock, return -** SQLITE_READONLY_CANTINIT and set pShmNode->isUnlocked=1. +** The DMS lock has not yet been taken on the shm file associated with +** pShmNode. Take the lock. Truncate the *-shm file if required. +** Return SQLITE_OK if successful, or an SQLite error code otherwise. */ -static int winLockSharedMemory(winShmNode *pShmNode){ - int rc = winShmSystemLock(pShmNode, WINSHM_WRLCK, WIN_SHM_DMS, 1); +static int winLockSharedMemory(winShmNode *pShmNode, DWORD nMs){ + HANDLE h = pShmNode->hSharedShm; + int rc = SQLITE_OK; + assert( sqlite3_mutex_held(pShmNode->mutex) ); + rc = winHandleLockTimeout(h, WIN_SHM_DMS, 1, 1, 0); if( rc==SQLITE_OK ){ + /* We have an EXCLUSIVE lock on the DMS byte. This means that this + ** is the first process to open the file. Truncate it to zero bytes + ** in this case. */ if( pShmNode->isReadonly ){ - pShmNode->isUnlocked = 1; - winShmSystemLock(pShmNode, WINSHM_UNLCK, WIN_SHM_DMS, 1); - return SQLITE_READONLY_CANTINIT; - }else if( winTruncate((sqlite3_file*)&pShmNode->hFile, 0) ){ - winShmSystemLock(pShmNode, WINSHM_UNLCK, WIN_SHM_DMS, 1); - return winLogError(SQLITE_IOERR_SHMOPEN, osGetLastError(), - "winLockSharedMemory", pShmNode->zFilename); + rc = SQLITE_READONLY_CANTINIT; + }else{ + rc = winHandleTruncate(h, 0); } + + /* Release the EXCLUSIVE lock acquired above. */ + winUnlockFile(&h, WIN_SHM_DMS, 0, 1, 0); + }else if( (rc & 0xFF)==SQLITE_BUSY ){ + rc = SQLITE_OK; } if( rc==SQLITE_OK ){ - winShmSystemLock(pShmNode, WINSHM_UNLCK, WIN_SHM_DMS, 1); + /* Take a SHARED lock on the DMS byte. */ + rc = winHandleLockTimeout(h, WIN_SHM_DMS, 1, 0, nMs); + if( rc==SQLITE_OK ){ + pShmNode->isUnlocked = 0; + } } - return winShmSystemLock(pShmNode, WINSHM_RDLCK, WIN_SHM_DMS, 1); + return rc; } + /* -** Open the shared-memory area associated with database file pDbFd. +** Convert a UTF-8 filename into whatever form the underlying +** operating system wants filenames in. Space to hold the result +** is obtained from malloc and must be freed by the calling +** function +** +** On Cygwin, 3 possible input forms are accepted: +** - If the filename starts with ":/" or ":\", +** it is converted to UTF-16 as-is. +** - If the filename contains '/', it is assumed to be a +** Cygwin absolute path, it is converted to a win32 +** absolute path in UTF-16. +** - Otherwise it must be a filename only, the win32 filename +** is returned in UTF-16. +** Note: If the function cygwin_conv_path() fails, only +** UTF-8 -> UTF-16 conversion will be done. This can only +** happen when the file path >32k, in which case winUtf8ToUnicode() +** will fail too. +*/ +static void *winConvertFromUtf8Filename(const char *zFilename){ + void *zConverted = 0; + if( osIsNT() ){ +#ifdef __CYGWIN__ + int nChar; + LPWSTR zWideFilename; + + if( osCygwin_conv_path && !(winIsDriveLetterAndColon(zFilename) + && winIsDirSep(zFilename[2])) ){ + i64 nByte; + int convertflag = CCP_POSIX_TO_WIN_W; + if( !strchr(zFilename, '/') ) convertflag |= CCP_RELATIVE; + nByte = (i64)osCygwin_conv_path(convertflag, + zFilename, 0, 0); + if( nByte>0 ){ + zConverted = sqlite3MallocZero(12+(u64)nByte); + if ( zConverted==0 ){ + return zConverted; + } + zWideFilename = zConverted; + /* Filenames should be prefixed, except when converted + * full path already starts with "\\?\". */ + if( osCygwin_conv_path(convertflag, zFilename, + zWideFilename+4, nByte)==0 ){ + if( (convertflag&CCP_RELATIVE) ){ + memmove(zWideFilename, zWideFilename+4, nByte); + }else if( memcmp(zWideFilename+4, L"\\\\", 4) ){ + memcpy(zWideFilename, L"\\\\?\\", 8); + }else if( zWideFilename[6]!='?' ){ + memmove(zWideFilename+6, zWideFilename+4, nByte); + memcpy(zWideFilename, L"\\\\?\\UNC", 14); + }else{ + memmove(zWideFilename, zWideFilename+4, nByte); + } + return zConverted; + } + sqlite3_free(zConverted); + } + } + nChar = osMultiByteToWideChar(CP_UTF8, 0, zFilename, -1, NULL, 0); + if( nChar==0 ){ + return 0; + } + zWideFilename = sqlite3MallocZero( nChar*sizeof(WCHAR)+12 ); + if( zWideFilename==0 ){ + return 0; + } + nChar = osMultiByteToWideChar(CP_UTF8, 0, zFilename, -1, + zWideFilename, nChar); + if( nChar==0 ){ + sqlite3_free(zWideFilename); + zWideFilename = 0; + }else if( nChar>MAX_PATH + && winIsDriveLetterAndColon(zFilename) + && winIsDirSep(zFilename[2]) ){ + memmove(zWideFilename+4, zWideFilename, nChar*sizeof(WCHAR)); + zWideFilename[2] = '\\'; + memcpy(zWideFilename, L"\\\\?\\", 8); + }else if( nChar>MAX_PATH + && winIsDirSep(zFilename[0]) && winIsDirSep(zFilename[1]) + && zFilename[2] != '?' ){ + memmove(zWideFilename+6, zWideFilename, nChar*sizeof(WCHAR)); + memcpy(zWideFilename, L"\\\\?\\UNC", 14); + } + zConverted = zWideFilename; +#else + zConverted = winUtf8ToUnicode(zFilename); +#endif /* __CYGWIN__ */ + } +#if defined(SQLITE_WIN32_HAS_ANSI) && defined(_WIN32) + else{ + zConverted = winUtf8ToMbcs(zFilename, osAreFileApisANSI()); + } +#endif + /* caller will handle out of memory */ + return zConverted; +} + +/* +** This function is used to open a handle on a *-shm file. ** -** When opening a new shared-memory file, if no other instances of that -** file are currently open, in this process or in other processes, then -** the file must be truncated to zero length or have its header cleared. +** If SQLITE_ENABLE_SETLK_TIMEOUT is defined at build time, then the file +** is opened with FILE_FLAG_OVERLAPPED specified. If not, it is not. +*/ +static int winHandleOpen( + const char *zUtf8, /* File to open */ + int *pbReadonly, /* IN/OUT: True for readonly handle */ + HANDLE *ph /* OUT: New HANDLE for file */ +){ + int rc = SQLITE_OK; + void *zConverted = 0; + int bReadonly = *pbReadonly; + HANDLE h = INVALID_HANDLE_VALUE; + +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + const DWORD flag_overlapped = FILE_FLAG_OVERLAPPED; +#else + const DWORD flag_overlapped = 0; +#endif + + /* Convert the filename to the system encoding. */ + zConverted = winConvertFromUtf8Filename(zUtf8); + if( zConverted==0 ){ + OSTRACE(("OPEN name=%s, rc=SQLITE_IOERR_NOMEM", zUtf8)); + rc = SQLITE_IOERR_NOMEM_BKPT; + goto winopenfile_out; + } + + /* Ensure the file we are trying to open is not actually a directory. */ + if( winIsDir(zConverted) ){ + OSTRACE(("OPEN name=%s, rc=SQLITE_CANTOPEN_ISDIR", zUtf8)); + rc = SQLITE_CANTOPEN_ISDIR; + goto winopenfile_out; + } + + /* TODO: platforms. + ** TODO: retry-on-ioerr. + */ + if( osIsNT() ){ +#if SQLITE_OS_WINRT + CREATEFILE2_EXTENDED_PARAMETERS extendedParameters; + memset(&extendedParameters, 0, sizeof(extendedParameters)); + extendedParameters.dwSize = sizeof(extendedParameters); + extendedParameters.dwFileAttributes = FILE_ATTRIBUTE_NORMAL; + extendedParameters.dwFileFlags = flag_overlapped; + extendedParameters.dwSecurityQosFlags = SECURITY_ANONYMOUS; + h = osCreateFile2((LPCWSTR)zConverted, + (GENERIC_READ | (bReadonly ? 0 : GENERIC_WRITE)),/* dwDesiredAccess */ + FILE_SHARE_READ | FILE_SHARE_WRITE, /* dwShareMode */ + OPEN_ALWAYS, /* dwCreationDisposition */ + &extendedParameters + ); +#else + h = osCreateFileW((LPCWSTR)zConverted, /* lpFileName */ + (GENERIC_READ | (bReadonly ? 0 : GENERIC_WRITE)), /* dwDesiredAccess */ + FILE_SHARE_READ | FILE_SHARE_WRITE, /* dwShareMode */ + NULL, /* lpSecurityAttributes */ + OPEN_ALWAYS, /* dwCreationDisposition */ + FILE_ATTRIBUTE_NORMAL|flag_overlapped, + NULL + ); +#endif + }else{ + /* Due to pre-processor directives earlier in this file, + ** SQLITE_WIN32_HAS_ANSI is always defined if osIsNT() is false. */ +#ifdef SQLITE_WIN32_HAS_ANSI + h = osCreateFileA((LPCSTR)zConverted, + (GENERIC_READ | (bReadonly ? 0 : GENERIC_WRITE)), /* dwDesiredAccess */ + FILE_SHARE_READ | FILE_SHARE_WRITE, /* dwShareMode */ + NULL, /* lpSecurityAttributes */ + OPEN_ALWAYS, /* dwCreationDisposition */ + FILE_ATTRIBUTE_NORMAL|flag_overlapped, + NULL + ); +#endif + } + + if( h==INVALID_HANDLE_VALUE ){ + if( bReadonly==0 ){ + bReadonly = 1; + rc = winHandleOpen(zUtf8, &bReadonly, &h); + }else{ + rc = SQLITE_CANTOPEN_BKPT; + } + } + + winopenfile_out: + sqlite3_free(zConverted); + *pbReadonly = bReadonly; + *ph = h; + return rc; +} + + +/* +** Open the shared-memory area associated with database file pDbFd. */ static int winOpenSharedMemory(winFile *pDbFd){ struct winShm *p; /* The connection to be opened */ @@ -50804,98 +51521,83 @@ static int winOpenSharedMemory(winFile *pDbFd){ assert( pDbFd->pShm==0 ); /* Not previously opened */ /* Allocate space for the new sqlite3_shm object. Also speculatively - ** allocate space for a new winShmNode and filename. - */ + ** allocate space for a new winShmNode and filename. */ p = sqlite3MallocZero( sizeof(*p) ); if( p==0 ) return SQLITE_IOERR_NOMEM_BKPT; nName = sqlite3Strlen30(pDbFd->zPath); - pNew = sqlite3MallocZero( sizeof(*pShmNode) + nName + 17 ); + pNew = sqlite3MallocZero( sizeof(*pShmNode) + (i64)nName + 17 ); if( pNew==0 ){ sqlite3_free(p); return SQLITE_IOERR_NOMEM_BKPT; } pNew->zFilename = (char*)&pNew[1]; + pNew->hSharedShm = INVALID_HANDLE_VALUE; + pNew->isUnlocked = 1; sqlite3_snprintf(nName+15, pNew->zFilename, "%s-shm", pDbFd->zPath); sqlite3FileSuffix3(pDbFd->zPath, pNew->zFilename); + /* Open a file-handle on the *-shm file for this connection. This file-handle + ** is only used for locking. The mapping of the *-shm file is created using + ** the shared file handle in winShmNode.hSharedShm. */ + p->bReadonly = sqlite3_uri_boolean(pDbFd->zPath, "readonly_shm", 0); + rc = winHandleOpen(pNew->zFilename, &p->bReadonly, &p->hShm); + /* Look to see if there is an existing winShmNode that can be used. - ** If no matching winShmNode currently exists, create a new one. - */ + ** If no matching winShmNode currently exists, then create a new one. */ winShmEnterMutex(); for(pShmNode = winShmNodeList; pShmNode; pShmNode=pShmNode->pNext){ /* TBD need to come up with better match here. Perhaps - ** use FILE_ID_BOTH_DIR_INFO Structure. - */ + ** use FILE_ID_BOTH_DIR_INFO Structure. */ if( sqlite3StrICmp(pShmNode->zFilename, pNew->zFilename)==0 ) break; } - if( pShmNode ){ - sqlite3_free(pNew); - }else{ - int inFlags = SQLITE_OPEN_WAL; - int outFlags = 0; - + if( pShmNode==0 ){ pShmNode = pNew; - pNew = 0; - ((winFile*)(&pShmNode->hFile))->h = INVALID_HANDLE_VALUE; - pShmNode->pNext = winShmNodeList; - winShmNodeList = pShmNode; + /* Allocate a mutex for this winShmNode object, if one is required. */ if( sqlite3GlobalConfig.bCoreMutex ){ pShmNode->mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_FAST); - if( pShmNode->mutex==0 ){ - rc = SQLITE_IOERR_NOMEM_BKPT; - goto shm_open_err; - } + if( pShmNode->mutex==0 ) rc = SQLITE_IOERR_NOMEM_BKPT; } - if( 0==sqlite3_uri_boolean(pDbFd->zPath, "readonly_shm", 0) ){ - inFlags |= SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE; - }else{ - inFlags |= SQLITE_OPEN_READONLY; - } - rc = winOpen(pDbFd->pVfs, pShmNode->zFilename, - (sqlite3_file*)&pShmNode->hFile, - inFlags, &outFlags); - if( rc!=SQLITE_OK ){ - rc = winLogError(rc, osGetLastError(), "winOpenShm", - pShmNode->zFilename); - goto shm_open_err; + /* Open a file-handle to use for mappings, and for the DMS lock. */ + if( rc==SQLITE_OK ){ + HANDLE h = INVALID_HANDLE_VALUE; + pShmNode->isReadonly = p->bReadonly; + rc = winHandleOpen(pNew->zFilename, &pShmNode->isReadonly, &h); + pShmNode->hSharedShm = h; } - if( outFlags==SQLITE_OPEN_READONLY ) pShmNode->isReadonly = 1; - rc = winLockSharedMemory(pShmNode); - if( rc!=SQLITE_OK && rc!=SQLITE_READONLY_CANTINIT ) goto shm_open_err; + /* If successful, link the new winShmNode into the global list. If an + ** error occurred, free the object. */ + if( rc==SQLITE_OK ){ + pShmNode->pNext = winShmNodeList; + winShmNodeList = pShmNode; + pNew = 0; + }else{ + sqlite3_mutex_free(pShmNode->mutex); + if( pShmNode->hSharedShm!=INVALID_HANDLE_VALUE ){ + osCloseHandle(pShmNode->hSharedShm); + } + } } - /* Make the new connection a child of the winShmNode */ - p->pShmNode = pShmNode; + /* If no error has occurred, link the winShm object to the winShmNode and + ** the winShm to pDbFd. */ + if( rc==SQLITE_OK ){ + p->pShmNode = pShmNode; + pShmNode->nRef++; #if defined(SQLITE_DEBUG) || defined(SQLITE_HAVE_OS_TRACE) - p->id = pShmNode->nextShmId++; + p->id = pShmNode->nextShmId++; #endif - pShmNode->nRef++; - pDbFd->pShm = p; - winShmLeaveMutex(); - - /* The reference count on pShmNode has already been incremented under - ** the cover of the winShmEnterMutex() mutex and the pointer from the - ** new (struct winShm) object to the pShmNode has been set. All that is - ** left to do is to link the new object into the linked list starting - ** at pShmNode->pFirst. This must be done while holding the pShmNode->mutex - ** mutex. - */ - sqlite3_mutex_enter(pShmNode->mutex); - p->pNext = pShmNode->pFirst; - pShmNode->pFirst = p; - sqlite3_mutex_leave(pShmNode->mutex); - return rc; + pDbFd->pShm = p; + }else if( p ){ + winHandleClose(p->hShm); + sqlite3_free(p); + } - /* Jump here on any error */ -shm_open_err: - winShmSystemLock(pShmNode, WINSHM_UNLCK, WIN_SHM_DMS, 1); - winShmPurge(pDbFd->pVfs, 0); /* This call frees pShmNode if required */ - sqlite3_free(p); - sqlite3_free(pNew); + assert( rc!=SQLITE_OK || pShmNode->isUnlocked==0 || pShmNode->nRegion==0 ); winShmLeaveMutex(); + sqlite3_free(pNew); return rc; } @@ -50910,27 +51612,19 @@ static int winShmUnmap( winFile *pDbFd; /* Database holding shared-memory */ winShm *p; /* The connection to be closed */ winShmNode *pShmNode; /* The underlying shared-memory file */ - winShm **pp; /* For looping over sibling connections */ pDbFd = (winFile*)fd; p = pDbFd->pShm; if( p==0 ) return SQLITE_OK; - pShmNode = p->pShmNode; - - /* Remove connection p from the set of connections associated - ** with pShmNode */ - sqlite3_mutex_enter(pShmNode->mutex); - for(pp=&pShmNode->pFirst; (*pp)!=p; pp = &(*pp)->pNext){} - *pp = p->pNext; + if( p->hShm!=INVALID_HANDLE_VALUE ){ + osCloseHandle(p->hShm); + } - /* Free the connection p */ - sqlite3_free(p); - pDbFd->pShm = 0; - sqlite3_mutex_leave(pShmNode->mutex); + pShmNode = p->pShmNode; + winShmEnterMutex(); /* If pShmNode->nRef has reached 0, then close the underlying - ** shared-memory file, too */ - winShmEnterMutex(); + ** shared-memory file, too. */ assert( pShmNode->nRef>0 ); pShmNode->nRef--; if( pShmNode->nRef==0 ){ @@ -50938,6 +51632,9 @@ static int winShmUnmap( } winShmLeaveMutex(); + /* Free the connection p */ + sqlite3_free(p); + pDbFd->pShm = 0; return SQLITE_OK; } @@ -50952,10 +51649,9 @@ static int winShmLock( ){ winFile *pDbFd = (winFile*)fd; /* Connection holding shared memory */ winShm *p = pDbFd->pShm; /* The shared memory being locked */ - winShm *pX; /* For looping over all siblings */ winShmNode *pShmNode; int rc = SQLITE_OK; /* Result code */ - u16 mask; /* Mask of locks to take or release */ + u16 mask = (u16)((1U<<(ofst+n)) - (1U<pShmNode; @@ -50969,85 +51665,81 @@ static int winShmLock( || flags==(SQLITE_SHM_UNLOCK | SQLITE_SHM_EXCLUSIVE) ); assert( n==1 || (flags & SQLITE_SHM_EXCLUSIVE)!=0 ); - mask = (u16)((1U<<(ofst+n)) - (1U<1 || mask==(1<mutex); - if( flags & SQLITE_SHM_UNLOCK ){ - u16 allMask = 0; /* Mask of locks held by siblings */ - - /* See if any siblings hold this same lock */ - for(pX=pShmNode->pFirst; pX; pX=pX->pNext){ - if( pX==p ) continue; - assert( (pX->exclMask & (p->exclMask|p->sharedMask))==0 ); - allMask |= pX->sharedMask; - } + /* Check that, if this to be a blocking lock, no locks that occur later + ** in the following list than the lock being obtained are already held: + ** + ** 1. Recovery lock (ofst==2). + ** 2. Checkpointer lock (ofst==1). + ** 3. Write lock (ofst==0). + ** 4. Read locks (ofst>=3 && ofstexclMask|p->sharedMask); + assert( (flags & SQLITE_SHM_UNLOCK) || pDbFd->iBusyTimeout==0 || ( + (ofst!=2 || lockMask==0) + && (ofst!=1 || lockMask==0 || lockMask==2) + && (ofst!=0 || lockMask<3) + && (ofst<3 || lockMask<(1<exclMask & mask) + ); + if( ((flags & SQLITE_SHM_UNLOCK) && ((p->exclMask|p->sharedMask) & mask)) + || (flags==(SQLITE_SHM_SHARED|SQLITE_SHM_LOCK) && 0==(p->sharedMask & mask)) + || (flags==(SQLITE_SHM_EXCLUSIVE|SQLITE_SHM_LOCK)) + ){ - /* Undo the local locks */ - if( rc==SQLITE_OK ){ - p->exclMask &= ~mask; - p->sharedMask &= ~mask; - } - }else if( flags & SQLITE_SHM_SHARED ){ - u16 allShared = 0; /* Union of locks held by connections other than "p" */ + if( flags & SQLITE_SHM_UNLOCK ){ + /* Case (a) - unlock. */ - /* Find out which shared locks are already held by sibling connections. - ** If any sibling already holds an exclusive lock, go ahead and return - ** SQLITE_BUSY. - */ - for(pX=pShmNode->pFirst; pX; pX=pX->pNext){ - if( (pX->exclMask & mask)!=0 ){ - rc = SQLITE_BUSY; - break; - } - allShared |= pX->sharedMask; - } + assert( (p->exclMask & p->sharedMask)==0 ); + assert( !(flags & SQLITE_SHM_EXCLUSIVE) || (p->exclMask & mask)==mask ); + assert( !(flags & SQLITE_SHM_SHARED) || (p->sharedMask & mask)==mask ); - /* Get shared locks at the system level, if necessary */ - if( rc==SQLITE_OK ){ - if( (allShared & mask)==0 ){ - rc = winShmSystemLock(pShmNode, WINSHM_RDLCK, ofst+WIN_SHM_BASE, n); - }else{ - rc = SQLITE_OK; - } - } + rc = winHandleUnlock(p->hShm, ofst+WIN_SHM_BASE, n); - /* Get the local shared locks */ - if( rc==SQLITE_OK ){ - p->sharedMask |= mask; - } - }else{ - /* Make sure no sibling connections hold locks that will block this - ** lock. If any do, return SQLITE_BUSY right away. - */ - for(pX=pShmNode->pFirst; pX; pX=pX->pNext){ - if( (pX->exclMask & mask)!=0 || (pX->sharedMask & mask)!=0 ){ - rc = SQLITE_BUSY; - break; + /* If successful, also clear the bits in sharedMask/exclMask */ + if( rc==SQLITE_OK ){ + p->exclMask = (p->exclMask & ~mask); + p->sharedMask = (p->sharedMask & ~mask); } - } - - /* Get the exclusive locks at the system level. Then if successful - ** also mark the local connection as being locked. - */ - if( rc==SQLITE_OK ){ - rc = winShmSystemLock(pShmNode, WINSHM_WRLCK, ofst+WIN_SHM_BASE, n); + }else{ + int bExcl = ((flags & SQLITE_SHM_EXCLUSIVE) ? 1 : 0); + DWORD nMs = winFileBusyTimeout(pDbFd); + rc = winHandleLockTimeout(p->hShm, ofst+WIN_SHM_BASE, n, bExcl, nMs); if( rc==SQLITE_OK ){ - assert( (p->sharedMask & mask)==0 ); - p->exclMask |= mask; + if( bExcl ){ + p->exclMask = (p->exclMask | mask); + }else{ + p->sharedMask = (p->sharedMask | mask); + } } } } - sqlite3_mutex_leave(pShmNode->mutex); - OSTRACE(("SHM-LOCK pid=%lu, id=%d, sharedMask=%03x, exclMask=%03x, rc=%s\n", - osGetCurrentProcessId(), p->id, p->sharedMask, p->exclMask, - sqlite3ErrName(rc))); + + OSTRACE(( + "SHM-LOCK(%d,%d,%d) pid=%lu, id=%d, sharedMask=%03x, exclMask=%03x," + " rc=%s\n", + ofst, n, flags, + osGetCurrentProcessId(), p->id, p->sharedMask, p->exclMask, + sqlite3ErrName(rc)) + ); return rc; } @@ -51109,13 +51801,15 @@ static int winShmMap( sqlite3_mutex_enter(pShmNode->mutex); if( pShmNode->isUnlocked ){ - rc = winLockSharedMemory(pShmNode); + /* Take the DMS lock. */ + assert( pShmNode->nRegion==0 ); + rc = winLockSharedMemory(pShmNode, winFileBusyTimeout(pDbFd)); if( rc!=SQLITE_OK ) goto shmpage_out; - pShmNode->isUnlocked = 0; } - assert( szRegion==pShmNode->szRegion || pShmNode->nRegion==0 ); + assert( szRegion==pShmNode->szRegion || pShmNode->nRegion==0 ); if( pShmNode->nRegion<=iRegion ){ + HANDLE hShared = pShmNode->hSharedShm; struct ShmRegion *apNew; /* New aRegion[] array */ int nByte = (iRegion+1)*szRegion; /* Minimum required file size */ sqlite3_int64 sz; /* Current size of wal-index file */ @@ -51126,10 +51820,9 @@ static int winShmMap( ** Check to see if it has been allocated (i.e. if the wal-index file is ** large enough to contain the requested region). */ - rc = winFileSize((sqlite3_file *)&pShmNode->hFile, &sz); + rc = winHandleSize(hShared, &sz); if( rc!=SQLITE_OK ){ - rc = winLogError(SQLITE_IOERR_SHMSIZE, osGetLastError(), - "winShmMap1", pDbFd->zPath); + rc = winLogError(rc, osGetLastError(), "winShmMap1", pDbFd->zPath); goto shmpage_out; } @@ -51138,19 +51831,17 @@ static int winShmMap( ** zero, exit early. *pp will be set to NULL and SQLITE_OK returned. ** ** Alternatively, if isWrite is non-zero, use ftruncate() to allocate - ** the requested memory region. - */ + ** the requested memory region. */ if( !isWrite ) goto shmpage_out; - rc = winTruncate((sqlite3_file *)&pShmNode->hFile, nByte); + rc = winHandleTruncate(hShared, nByte); if( rc!=SQLITE_OK ){ - rc = winLogError(SQLITE_IOERR_SHMSIZE, osGetLastError(), - "winShmMap2", pDbFd->zPath); + rc = winLogError(rc, osGetLastError(), "winShmMap2", pDbFd->zPath); goto shmpage_out; } } /* Map the requested memory region into this processes address space. */ - apNew = (struct ShmRegion *)sqlite3_realloc64( + apNew = (struct ShmRegion*)sqlite3_realloc64( pShmNode->aRegion, (iRegion+1)*sizeof(apNew[0]) ); if( !apNew ){ @@ -51169,18 +51860,13 @@ static int winShmMap( void *pMap = 0; /* Mapped memory region */ #if SQLITE_OS_WINRT - hMap = osCreateFileMappingFromApp(pShmNode->hFile.h, - NULL, protect, nByte, NULL - ); + hMap = osCreateFileMappingFromApp(hShared, NULL, protect, nByte, NULL); #elif defined(SQLITE_WIN32_HAS_WIDE) - hMap = osCreateFileMappingW(pShmNode->hFile.h, - NULL, protect, 0, nByte, NULL - ); + hMap = osCreateFileMappingW(hShared, NULL, protect, 0, nByte, NULL); #elif defined(SQLITE_WIN32_HAS_ANSI) && SQLITE_WIN32_CREATEFILEMAPPINGA - hMap = osCreateFileMappingA(pShmNode->hFile.h, - NULL, protect, 0, nByte, NULL - ); + hMap = osCreateFileMappingA(hShared, NULL, protect, 0, nByte, NULL); #endif + OSTRACE(("SHM-MAP-CREATE pid=%lu, region=%d, size=%d, rc=%s\n", osGetCurrentProcessId(), pShmNode->nRegion, nByte, hMap ? "ok" : "failed")); @@ -51223,7 +51909,9 @@ static int winShmMap( }else{ *pp = 0; } - if( pShmNode->isReadonly && rc==SQLITE_OK ) rc = SQLITE_READONLY; + if( pShmNode->isReadonly && rc==SQLITE_OK ){ + rc = SQLITE_READONLY; + } sqlite3_mutex_leave(pShmNode->mutex); return rc; } @@ -51543,47 +52231,6 @@ static winVfsAppData winNolockAppData = { ** sqlite3_vfs object. */ -#if defined(__CYGWIN__) -/* -** Convert a filename from whatever the underlying operating system -** supports for filenames into UTF-8. Space to hold the result is -** obtained from malloc and must be freed by the calling function. -*/ -static char *winConvertToUtf8Filename(const void *zFilename){ - char *zConverted = 0; - if( osIsNT() ){ - zConverted = winUnicodeToUtf8(zFilename); - } -#ifdef SQLITE_WIN32_HAS_ANSI - else{ - zConverted = winMbcsToUtf8(zFilename, osAreFileApisANSI()); - } -#endif - /* caller will handle out of memory */ - return zConverted; -} -#endif - -/* -** Convert a UTF-8 filename into whatever form the underlying -** operating system wants filenames in. Space to hold the result -** is obtained from malloc and must be freed by the calling -** function. -*/ -static void *winConvertFromUtf8Filename(const char *zFilename){ - void *zConverted = 0; - if( osIsNT() ){ - zConverted = winUtf8ToUnicode(zFilename); - } -#ifdef SQLITE_WIN32_HAS_ANSI - else{ - zConverted = winUtf8ToMbcs(zFilename, osAreFileApisANSI()); - } -#endif - /* caller will handle out of memory */ - return zConverted; -} - /* ** This function returns non-zero if the specified UTF-8 string buffer ** ends with a directory separator character or one was successfully @@ -51596,7 +52243,14 @@ static int winMakeEndInDirSep(int nBuf, char *zBuf){ if( winIsDirSep(zBuf[nLen-1]) ){ return 1; }else if( nLen+1mxPathname; nBuf = nMax + 2; + nMax = pVfs->mxPathname; + nBuf = 2 + (i64)nMax; zBuf = sqlite3MallocZero( nBuf ); if( !zBuf ){ OSTRACE(("TEMP-FILENAME rc=SQLITE_IOERR_NOMEM\n")); @@ -51673,7 +52328,7 @@ static int winGetTempname(sqlite3_vfs *pVfs, char **pzBuf){ } #if defined(__CYGWIN__) - else{ + else if( osGetenv!=NULL ){ static const char *azDirs[] = { 0, /* getenv("SQLITE_TMPDIR") */ 0, /* getenv("TMPDIR") */ @@ -51689,11 +52344,11 @@ static int winGetTempname(sqlite3_vfs *pVfs, char **pzBuf){ unsigned int i; const char *zDir = 0; - if( !azDirs[0] ) azDirs[0] = getenv("SQLITE_TMPDIR"); - if( !azDirs[1] ) azDirs[1] = getenv("TMPDIR"); - if( !azDirs[2] ) azDirs[2] = getenv("TMP"); - if( !azDirs[3] ) azDirs[3] = getenv("TEMP"); - if( !azDirs[4] ) azDirs[4] = getenv("USERPROFILE"); + if( !azDirs[0] ) azDirs[0] = osGetenv("SQLITE_TMPDIR"); + if( !azDirs[1] ) azDirs[1] = osGetenv("TMPDIR"); + if( !azDirs[2] ) azDirs[2] = osGetenv("TMP"); + if( !azDirs[3] ) azDirs[3] = osGetenv("TEMP"); + if( !azDirs[4] ) azDirs[4] = osGetenv("USERPROFILE"); for(i=0; inOut ){ + /* SQLite assumes that xFullPathname() nul-terminates the output buffer + ** even if it returns an error. */ + zOut[iOff] = '\0'; + return SQLITE_CANTOPEN_BKPT; + } + sqlite3_snprintf(nOut-iOff, &zOut[iOff], "%s", zPath); + return SQLITE_OK; +} +#endif /* __CYGWIN__ */ /* ** Turn a relative pathname into a full pathname. Write the full @@ -52476,8 +53180,8 @@ static int winFullPathnameNoMutex( int nFull, /* Size of output buffer in bytes */ char *zFull /* Output buffer */ ){ -#if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT && !defined(__CYGWIN__) - DWORD nByte; +#if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT + int nByte; void *zConverted; char *zOut; #endif @@ -52490,64 +53194,82 @@ static int winFullPathnameNoMutex( zRelative++; } -#if defined(__CYGWIN__) SimulateIOError( return SQLITE_ERROR ); - UNUSED_PARAMETER(nFull); - assert( nFull>=pVfs->mxPathname ); - if ( sqlite3_data_directory && !winIsVerbatimPathname(zRelative) ){ - /* - ** NOTE: We are dealing with a relative path name and the data - ** directory has been set. Therefore, use it as the basis - ** for converting the relative path name to an absolute - ** one by prepending the data directory and a slash. - */ - char *zOut = sqlite3MallocZero( pVfs->mxPathname+1 ); - if( !zOut ){ - return SQLITE_IOERR_NOMEM_BKPT; - } - if( cygwin_conv_path( - (osIsNT() ? CCP_POSIX_TO_WIN_W : CCP_POSIX_TO_WIN_A) | - CCP_RELATIVE, zRelative, zOut, pVfs->mxPathname+1)<0 ){ - sqlite3_free(zOut); - return winLogError(SQLITE_CANTOPEN_CONVPATH, (DWORD)errno, - "winFullPathname1", zRelative); - }else{ - char *zUtf8 = winConvertToUtf8Filename(zOut); - if( !zUtf8 ){ - sqlite3_free(zOut); - return SQLITE_IOERR_NOMEM_BKPT; - } - sqlite3_snprintf(MIN(nFull, pVfs->mxPathname), zFull, "%s%c%s", - sqlite3_data_directory, winGetDirSep(), zUtf8); - sqlite3_free(zUtf8); - sqlite3_free(zOut); - } - }else{ - char *zOut = sqlite3MallocZero( pVfs->mxPathname+1 ); - if( !zOut ){ - return SQLITE_IOERR_NOMEM_BKPT; - } - if( cygwin_conv_path( - (osIsNT() ? CCP_POSIX_TO_WIN_W : CCP_POSIX_TO_WIN_A), - zRelative, zOut, pVfs->mxPathname+1)<0 ){ - sqlite3_free(zOut); - return winLogError(SQLITE_CANTOPEN_CONVPATH, (DWORD)errno, - "winFullPathname2", zRelative); - }else{ - char *zUtf8 = winConvertToUtf8Filename(zOut); - if( !zUtf8 ){ - sqlite3_free(zOut); - return SQLITE_IOERR_NOMEM_BKPT; - } - sqlite3_snprintf(MIN(nFull, pVfs->mxPathname), zFull, "%s", zUtf8); - sqlite3_free(zUtf8); - sqlite3_free(zOut); + +#ifdef __CYGWIN__ + if( osGetcwd ){ + zFull[nFull-1] = '\0'; + if( !winIsDriveLetterAndColon(zRelative) || !winIsDirSep(zRelative[2]) ){ + int rc = SQLITE_OK; + int nLink = 1; /* Number of symbolic links followed so far */ + const char *zIn = zRelative; /* Input path for each iteration of loop */ + char *zDel = 0; + struct stat buf; + + UNUSED_PARAMETER(pVfs); + + do { + /* Call lstat() on path zIn. Set bLink to true if the path is a symbolic + ** link, or false otherwise. */ + int bLink = 0; + if( osLstat && osReadlink ) { + if( osLstat(zIn, &buf)!=0 ){ + int myErrno = osErrno; + if( myErrno!=ENOENT ){ + rc = winLogError(SQLITE_CANTOPEN_BKPT, (DWORD)myErrno, "lstat", zIn); + } + }else{ + bLink = ((buf.st_mode & 0170000) == 0120000); + } + + if( bLink ){ + if( zDel==0 ){ + zDel = sqlite3MallocZero(nFull); + if( zDel==0 ) rc = SQLITE_NOMEM; + }else if( ++nLink>SQLITE_MAX_SYMLINKS ){ + rc = SQLITE_CANTOPEN_BKPT; + } + + if( rc==SQLITE_OK ){ + nByte = osReadlink(zIn, zDel, nFull-1); + if( nByte ==(DWORD)-1 ){ + rc = winLogError(SQLITE_CANTOPEN_BKPT, (DWORD)osErrno, "readlink", zIn); + }else{ + if( zDel[0]!='/' ){ + int n; + for(n = sqlite3Strlen30(zIn); n>0 && zIn[n-1]!='/'; n--); + if( nByte+n+1>nFull ){ + rc = SQLITE_CANTOPEN_BKPT; + }else{ + memmove(&zDel[n], zDel, nByte+1); + memcpy(zDel, zIn, n); + nByte += n; + } + } + zDel[nByte] = '\0'; + } + } + + zIn = zDel; + } + } + + assert( rc!=SQLITE_OK || zIn!=zFull || zIn[0]=='/' ); + if( rc==SQLITE_OK && zIn!=zFull ){ + rc = mkFullPathname(zIn, zFull, nFull); + } + if( bLink==0 ) break; + zIn = zFull; + }while( rc==SQLITE_OK ); + + sqlite3_free(zDel); + winSimplifyName(zFull); + return rc; } } - return SQLITE_OK; -#endif +#endif /* __CYGWIN__ */ -#if (SQLITE_OS_WINCE || SQLITE_OS_WINRT) && !defined(__CYGWIN__) +#if (SQLITE_OS_WINCE || SQLITE_OS_WINRT) && defined(_WIN32) SimulateIOError( return SQLITE_ERROR ); /* WinCE has no concept of a relative pathname, or so I am told. */ /* WinRT has no way to convert a relative path to an absolute one. */ @@ -52566,7 +53288,8 @@ static int winFullPathnameNoMutex( return SQLITE_OK; #endif -#if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT && !defined(__CYGWIN__) +#if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT +#if defined(_WIN32) /* It's odd to simulate an io-error here, but really this is just ** using the io-error infrastructure to test that SQLite handles this ** function failing. This function could fail if, for example, the @@ -52584,6 +53307,7 @@ static int winFullPathnameNoMutex( sqlite3_data_directory, winGetDirSep(), zRelative); return SQLITE_OK; } +#endif zConverted = winConvertFromUtf8Filename(zRelative); if( zConverted==0 ){ return SQLITE_IOERR_NOMEM_BKPT; @@ -52622,13 +53346,12 @@ static int winFullPathnameNoMutex( return winLogError(SQLITE_CANTOPEN_FULLPATH, osGetLastError(), "winFullPathname3", zRelative); } - nByte += 3; - zTemp = sqlite3MallocZero( nByte*sizeof(zTemp[0]) ); + zTemp = sqlite3MallocZero( nByte*sizeof(zTemp[0]) + 3*sizeof(zTemp[0]) ); if( zTemp==0 ){ sqlite3_free(zConverted); return SQLITE_IOERR_NOMEM_BKPT; } - nByte = osGetFullPathNameA((char*)zConverted, nByte, zTemp, 0); + nByte = osGetFullPathNameA((char*)zConverted, nByte+3, zTemp, 0); if( nByte==0 ){ sqlite3_free(zConverted); sqlite3_free(zTemp); @@ -52641,7 +53364,26 @@ static int winFullPathnameNoMutex( } #endif if( zOut ){ +#ifdef __CYGWIN__ + if( memcmp(zOut, "\\\\?\\", 4) ){ + sqlite3_snprintf(MIN(nFull, pVfs->mxPathname), zFull, "%s", zOut); + }else if( memcmp(zOut+4, "UNC\\", 4) ){ + sqlite3_snprintf(MIN(nFull, pVfs->mxPathname), zFull, "%s", zOut+4); + }else{ + char *p = zOut+6; + *p = '\\'; + if( osGetcwd ){ + /* On Cygwin, UNC paths use forward slashes */ + while( *p ){ + if( *p=='\\' ) *p = '/'; + ++p; + } + } + sqlite3_snprintf(MIN(nFull, pVfs->mxPathname), zFull, "%s", zOut+6); + } +#else sqlite3_snprintf(MIN(nFull, pVfs->mxPathname), zFull, "%s", zOut); +#endif /* __CYGWIN__ */ sqlite3_free(zOut); return SQLITE_OK; }else{ @@ -52671,25 +53413,8 @@ static int winFullPathname( */ static void *winDlOpen(sqlite3_vfs *pVfs, const char *zFilename){ HANDLE h; -#if defined(__CYGWIN__) - int nFull = pVfs->mxPathname+1; - char *zFull = sqlite3MallocZero( nFull ); - void *zConverted = 0; - if( zFull==0 ){ - OSTRACE(("DLOPEN name=%s, handle=%p\n", zFilename, (void*)0)); - return 0; - } - if( winFullPathname(pVfs, zFilename, nFull, zFull)!=SQLITE_OK ){ - sqlite3_free(zFull); - OSTRACE(("DLOPEN name=%s, handle=%p\n", zFilename, (void*)0)); - return 0; - } - zConverted = winConvertFromUtf8Filename(zFull); - sqlite3_free(zFull); -#else void *zConverted = winConvertFromUtf8Filename(zFilename); UNUSED_PARAMETER(pVfs); -#endif if( zConverted==0 ){ OSTRACE(("DLOPEN name=%s, handle=%p\n", zFilename, (void*)0)); return 0; @@ -53038,7 +53763,7 @@ SQLITE_API int sqlite3_os_init(void){ /* Double-check that the aSyscall[] array has been constructed ** correctly. See ticket [bb3a86e890c8e96ab] */ - assert( ArraySize(aSyscall)==80 ); + assert( ArraySize(aSyscall)==89 ); /* get memory map allocation granularity */ memset(&winSysInfo, 0, sizeof(SYSTEM_INFO)); @@ -53657,13 +54382,13 @@ static int memdbOpen( } if( p==0 ){ MemStore **apNew; - p = sqlite3Malloc( sizeof(*p) + szName + 3 ); + p = sqlite3Malloc( sizeof(*p) + (i64)szName + 3 ); if( p==0 ){ sqlite3_mutex_leave(pVfsMutex); return SQLITE_NOMEM; } apNew = sqlite3Realloc(memdb_g.apMemStore, - sizeof(apNew[0])*(memdb_g.nMemStore+1) ); + sizeof(apNew[0])*(1+(i64)memdb_g.nMemStore) ); if( apNew==0 ){ sqlite3_free(p); sqlite3_mutex_leave(pVfsMutex); @@ -54096,7 +54821,7 @@ SQLITE_PRIVATE int sqlite3MemdbInit(void){ ** no fewer collisions than the no-op *1. */ #define BITVEC_HASH(X) (((X)*1)%BITVEC_NINT) -#define BITVEC_NPTR (BITVEC_USIZE/sizeof(Bitvec *)) +#define BITVEC_NPTR ((u32)(BITVEC_USIZE/sizeof(Bitvec *))) /* @@ -54245,7 +54970,9 @@ SQLITE_PRIVATE int sqlite3BitvecSet(Bitvec *p, u32 i){ }else{ memcpy(aiValues, p->u.aHash, sizeof(p->u.aHash)); memset(p->u.apSub, 0, sizeof(p->u.apSub)); - p->iDivisor = (p->iSize + BITVEC_NPTR - 1)/BITVEC_NPTR; + p->iDivisor = p->iSize/BITVEC_NPTR; + if( (p->iSize%BITVEC_NPTR)!=0 ) p->iDivisor++; + if( p->iDivisoriDivisor = BITVEC_NBIT; rc = sqlite3BitvecSet(p, i); for(j=0; jiSize<=BITVEC_NBIT ){ - p->u.aBitmap[i/BITVEC_SZELEM] &= ~(1 << (i&(BITVEC_SZELEM-1))); + p->u.aBitmap[i/BITVEC_SZELEM] &= ~(BITVEC_TELEM)(1<<(i&(BITVEC_SZELEM-1))); }else{ unsigned int j; u32 *aiValues = pBuf; @@ -54330,7 +55057,7 @@ SQLITE_PRIVATE u32 sqlite3BitvecSize(Bitvec *p){ ** individual bits within V. */ #define SETBIT(V,I) V[I>>3] |= (1<<(I&7)) -#define CLEARBIT(V,I) V[I>>3] &= ~(1<<(I&7)) +#define CLEARBIT(V,I) V[I>>3] &= ~(BITVEC_TELEM)(1<<(I&7)) #define TESTBIT(V,I) (V[I>>3]&(1<<(I&7)))!=0 /* @@ -54373,7 +55100,7 @@ SQLITE_PRIVATE int sqlite3BitvecBuiltinTest(int sz, int *aOp){ /* Allocate the Bitvec to be tested and a linear array of ** bits to act as the reference */ pBitvec = sqlite3BitvecCreate( sz ); - pV = sqlite3MallocZero( (sz+7)/8 + 1 ); + pV = sqlite3MallocZero( (7+(i64)sz)/8 + 1 ); pTmpSpace = sqlite3_malloc64(BITVEC_SZ); if( pBitvec==0 || pV==0 || pTmpSpace==0 ) goto bitvec_end; @@ -55614,10 +56341,6 @@ static SQLITE_WSD struct PCacheGlobal { sqlite3_mutex *mutex; /* Mutex for accessing the following: */ PgFreeslot *pFree; /* Free page blocks */ int nFreeSlot; /* Number of unused pcache slots */ - /* The following value requires a mutex to change. We skip the mutex on - ** reading because (1) most platforms read a 32-bit integer atomically and - ** (2) even if an incorrect value is read, no great harm is done since this - ** is really just an optimization. */ int bUnderPressure; /* True if low on PAGECACHE memory */ } pcache1_g; @@ -55665,7 +56388,7 @@ SQLITE_PRIVATE void sqlite3PCacheBufferSetup(void *pBuf, int sz, int n){ pcache1.nReserve = n>90 ? 10 : (n/10 + 1); pcache1.pStart = pBuf; pcache1.pFree = 0; - pcache1.bUnderPressure = 0; + AtomicStore(&pcache1.bUnderPressure,0); while( n-- ){ p = (PgFreeslot*)pBuf; p->pNext = pcache1.pFree; @@ -55733,7 +56456,7 @@ static void *pcache1Alloc(int nByte){ if( p ){ pcache1.pFree = pcache1.pFree->pNext; pcache1.nFreeSlot--; - pcache1.bUnderPressure = pcache1.nFreeSlot=0 ); sqlite3StatusHighwater(SQLITE_STATUS_PAGECACHE_SIZE, nByte); sqlite3StatusUp(SQLITE_STATUS_PAGECACHE_USED, 1); @@ -55772,7 +56495,7 @@ static void pcache1Free(void *p){ pSlot->pNext = pcache1.pFree; pcache1.pFree = pSlot; pcache1.nFreeSlot++; - pcache1.bUnderPressure = pcache1.nFreeSlotszPage+pCache->szExtra)<=pcache1.szSlot ){ - return pcache1.bUnderPressure; + return AtomicLoad(&pcache1.bUnderPressure); }else{ return sqlite3HeapNearlyFull(); } @@ -55920,12 +56643,12 @@ static int pcache1UnderMemoryPressure(PCache1 *pCache){ */ static void pcache1ResizeHash(PCache1 *p){ PgHdr1 **apNew; - unsigned int nNew; - unsigned int i; + u64 nNew; + u32 i; assert( sqlite3_mutex_held(p->pGroup->mutex) ); - nNew = p->nHash*2; + nNew = 2*(u64)p->nHash; if( nNew<256 ){ nNew = 256; } @@ -56148,7 +56871,7 @@ static void pcache1Destroy(sqlite3_pcache *p); static sqlite3_pcache *pcache1Create(int szPage, int szExtra, int bPurgeable){ PCache1 *pCache; /* The newly created page cache */ PGroup *pGroup; /* The group the new page cache will belong to */ - int sz; /* Bytes of memory required to allocate the new cache */ + i64 sz; /* Bytes of memory required to allocate the new cache */ assert( (szPage & (szPage-1))==0 && szPage>=512 && szPage<=65536 ); assert( szExtra < 300 ); @@ -58036,6 +58759,9 @@ struct Pager { Wal *pWal; /* Write-ahead log used by "journal_mode=wal" */ char *zWal; /* File name for write-ahead log */ #endif +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + sqlite3 *dbWal; +#endif }; /* @@ -58627,7 +59353,7 @@ static void checkPage(PgHdr *pPg){ ** If an error occurs while reading from the journal file, an SQLite ** error code is returned. */ -static int readSuperJournal(sqlite3_file *pJrnl, char *zSuper, u32 nSuper){ +static int readSuperJournal(sqlite3_file *pJrnl, char *zSuper, u64 nSuper){ int rc; /* Return code */ u32 len; /* Length in bytes of super-journal name */ i64 szJ; /* Total size in bytes of journal file pJrnl */ @@ -59182,6 +59908,15 @@ static void pager_unlock(Pager *pPager){ if( pagerUseWal(pPager) ){ assert( !isOpen(pPager->jfd) ); + if( pPager->eState==PAGER_ERROR ){ + /* If an IO error occurs in wal.c while attempting to wrap the wal file, + ** then the Wal object may be holding a write-lock but no read-lock. + ** This call ensures that the write-lock is dropped as well. We cannot + ** have sqlite3WalEndReadTransaction() drop the write-lock, as it once + ** did, because this would break "BEGIN EXCLUSIVE" handling for + ** SQLITE_ENABLE_SETLK_TIMEOUT builds. */ + sqlite3WalEndWriteTransaction(pPager->pWal); + } sqlite3WalEndReadTransaction(pPager->pWal); pPager->eState = PAGER_OPEN; }else if( !pPager->exclusiveMode ){ @@ -59863,12 +60598,12 @@ static int pager_delsuper(Pager *pPager, const char *zSuper){ char *zJournal; /* Pointer to one journal within MJ file */ char *zSuperPtr; /* Space to hold super-journal filename */ char *zFree = 0; /* Free this buffer */ - int nSuperPtr; /* Amount of space allocated to zSuperPtr[] */ + i64 nSuperPtr; /* Amount of space allocated to zSuperPtr[] */ /* Allocate space for both the pJournal and pSuper file descriptors. ** If successful, open the super-journal file for reading. */ - pSuper = (sqlite3_file *)sqlite3MallocZero(pVfs->szOsFile * 2); + pSuper = (sqlite3_file *)sqlite3MallocZero(2 * (i64)pVfs->szOsFile); if( !pSuper ){ rc = SQLITE_NOMEM_BKPT; pJournal = 0; @@ -59886,11 +60621,14 @@ static int pager_delsuper(Pager *pPager, const char *zSuper){ */ rc = sqlite3OsFileSize(pSuper, &nSuperJournal); if( rc!=SQLITE_OK ) goto delsuper_out; - nSuperPtr = pVfs->mxPathname+1; + nSuperPtr = 1 + (i64)pVfs->mxPathname; + assert( nSuperJournal>=0 && nSuperPtr>0 ); zFree = sqlite3Malloc(4 + nSuperJournal + nSuperPtr + 2); if( !zFree ){ rc = SQLITE_NOMEM_BKPT; goto delsuper_out; + }else{ + assert( nSuperJournal<=0x7fffffff ); } zFree[0] = zFree[1] = zFree[2] = zFree[3] = 0; zSuperJournal = &zFree[4]; @@ -60151,7 +60889,7 @@ static int pager_playback(Pager *pPager, int isHot){ ** for pageSize. */ zSuper = pPager->pTmpSpace; - rc = readSuperJournal(pPager->jfd, zSuper, pPager->pVfs->mxPathname+1); + rc = readSuperJournal(pPager->jfd, zSuper, 1+(i64)pPager->pVfs->mxPathname); if( rc==SQLITE_OK && zSuper[0] ){ rc = sqlite3OsAccess(pVfs, zSuper, SQLITE_ACCESS_EXISTS, &res); } @@ -60290,7 +61028,7 @@ static int pager_playback(Pager *pPager, int isHot){ ** which case it requires 4 0x00 bytes in memory immediately before ** the filename. */ zSuper = &pPager->pTmpSpace[4]; - rc = readSuperJournal(pPager->jfd, zSuper, pPager->pVfs->mxPathname+1); + rc = readSuperJournal(pPager->jfd, zSuper, 1+(i64)pPager->pVfs->mxPathname); testcase( rc!=SQLITE_OK ); } if( rc==SQLITE_OK @@ -62061,6 +62799,7 @@ SQLITE_PRIVATE int sqlite3PagerOpen( const char *zUri = 0; /* URI args to copy */ int nUriByte = 1; /* Number of bytes of URI args at *zUri */ + /* Figure out how much space is required for each journal file-handle ** (there are two of them, the main journal and the sub-journal). */ journalFileSize = ROUND8(sqlite3JournalSize(pVfs)); @@ -62086,8 +62825,8 @@ SQLITE_PRIVATE int sqlite3PagerOpen( */ if( zFilename && zFilename[0] ){ const char *z; - nPathname = pVfs->mxPathname+1; - zPathname = sqlite3DbMallocRaw(0, nPathname*2); + nPathname = pVfs->mxPathname + 1; + zPathname = sqlite3DbMallocRaw(0, 2*(i64)nPathname); if( zPathname==0 ){ return SQLITE_NOMEM_BKPT; } @@ -62174,14 +62913,14 @@ SQLITE_PRIVATE int sqlite3PagerOpen( ROUND8(sizeof(*pPager)) + /* Pager structure */ ROUND8(pcacheSize) + /* PCache object */ ROUND8(pVfs->szOsFile) + /* The main db file */ - journalFileSize * 2 + /* The two journal files */ + (u64)journalFileSize * 2 + /* The two journal files */ SQLITE_PTRSIZE + /* Space to hold a pointer */ 4 + /* Database prefix */ - nPathname + 1 + /* database filename */ - nUriByte + /* query parameters */ - nPathname + 8 + 1 + /* Journal filename */ + (u64)nPathname + 1 + /* database filename */ + (u64)nUriByte + /* query parameters */ + (u64)nPathname + 8 + 1 + /* Journal filename */ #ifndef SQLITE_OMIT_WAL - nPathname + 4 + 1 + /* WAL filename */ + (u64)nPathname + 4 + 1 + /* WAL filename */ #endif 3 /* Terminator */ ); @@ -64904,6 +65643,11 @@ static int pagerOpenWal(Pager *pPager){ pPager->fd, pPager->zWal, pPager->exclusiveMode, pPager->journalSizeLimit, &pPager->pWal ); +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + if( rc==SQLITE_OK ){ + sqlite3WalDb(pPager->pWal, pPager->dbWal); + } +#endif } pagerFixMaplimit(pPager); @@ -65023,6 +65767,7 @@ SQLITE_PRIVATE int sqlite3PagerWalWriteLock(Pager *pPager, int bLock){ ** blocking locks are required. */ SQLITE_PRIVATE void sqlite3PagerWalDb(Pager *pPager, sqlite3 *db){ + pPager->dbWal = db; if( pagerUseWal(pPager) ){ sqlite3WalDb(pPager->pWal, db); } @@ -65636,6 +66381,11 @@ struct WalCkptInfo { /* ** An open write-ahead log file is represented by an instance of the ** following object. +** +** writeLock: +** This is usually set to 1 whenever the WRITER lock is held. However, +** if it is set to 2, then the WRITER lock is held but must be released +** by walHandleException() if a SEH exception is thrown. */ struct Wal { sqlite3_vfs *pVfs; /* The VFS used to create pDbFd */ @@ -65726,9 +66476,13 @@ struct WalIterator { u32 *aPgno; /* Array of page numbers. */ int nEntry; /* Nr. of entries in aPgno[] and aIndex[] */ int iZero; /* Frame number associated with aPgno[0] */ - } aSegment[1]; /* One for every 32KB page in the wal-index */ + } aSegment[FLEXARRAY]; /* One for every 32KB page in the wal-index */ }; +/* Size (in bytes) of a WalIterator object suitable for N or fewer segments */ +#define SZ_WALITERATOR(N) \ + (offsetof(WalIterator,aSegment)*(N)*sizeof(struct WalSegment)) + /* ** Define the parameters of the hash tables in the wal-index file. There ** is a hash-table following every HASHTABLE_NPAGE page numbers in the @@ -65887,7 +66641,7 @@ static SQLITE_NOINLINE int walIndexPageRealloc( /* Enlarge the pWal->apWiData[] array if required */ if( pWal->nWiData<=iPage ){ - sqlite3_int64 nByte = sizeof(u32*)*(iPage+1); + sqlite3_int64 nByte = sizeof(u32*)*(1+(i64)iPage); volatile u32 **apNew; apNew = (volatile u32 **)sqlite3Realloc((void *)pWal->apWiData, nByte); if( !apNew ){ @@ -65996,10 +66750,8 @@ static void walChecksumBytes( s1 = s2 = 0; } - assert( nByte>=8 ); - assert( (nByte&0x00000007)==0 ); - assert( nByte<=65536 ); - assert( nByte%4==0 ); + /* nByte is a multiple of 8 between 8 and 65536 */ + assert( nByte>=8 && (nByte&7)==0 && nByte<=65536 ); if( !nativeCksum ){ do { @@ -67089,8 +67841,7 @@ static int walIteratorInit(Wal *pWal, u32 nBackfill, WalIterator **pp){ /* Allocate space for the WalIterator object. */ nSegment = walFramePage(iLast) + 1; - nByte = sizeof(WalIterator) - + (nSegment-1)*sizeof(struct WalSegment) + nByte = SZ_WALITERATOR(nSegment) + iLast*sizeof(ht_slot); p = (WalIterator *)sqlite3_malloc64(nByte + sizeof(ht_slot) * (iLast>HASHTABLE_NPAGE?HASHTABLE_NPAGE:iLast) @@ -67161,7 +67912,7 @@ static int walEnableBlockingMs(Wal *pWal, int nMs){ static int walEnableBlocking(Wal *pWal){ int res = 0; if( pWal->db ){ - int tmout = pWal->db->busyTimeout; + int tmout = pWal->db->setlkTimeout; if( tmout ){ res = walEnableBlockingMs(pWal, tmout); } @@ -67547,7 +68298,9 @@ static int walHandleException(Wal *pWal){ static const int S = 1; static const int E = (1<lockMask & ~( + u32 mUnlock; + if( pWal->writeLock==2 ) pWal->writeLock = 0; + mUnlock = pWal->lockMask & ~( (pWal->readLock<0 ? 0 : (S << WAL_READ_LOCK(pWal->readLock))) | (pWal->writeLock ? (E << WAL_WRITE_LOCK) : 0) | (pWal->ckptLock ? (E << WAL_CKPT_LOCK) : 0) @@ -67819,7 +68572,12 @@ static int walIndexReadHdr(Wal *pWal, int *pChanged){ if( bWriteLock || SQLITE_OK==(rc = walLockExclusive(pWal, WAL_WRITE_LOCK, 1)) ){ - pWal->writeLock = 1; + /* If the write-lock was just obtained, set writeLock to 2 instead of + ** the usual 1. This causes walIndexPage() to behave as if the + ** write-lock were held (so that it allocates new pages as required), + ** and walHandleException() to unlock the write-lock if a SEH exception + ** is thrown. */ + if( !bWriteLock ) pWal->writeLock = 2; if( SQLITE_OK==(rc = walIndexPage(pWal, 0, &page0)) ){ badHdr = walIndexTryHdr(pWal, pChanged); if( badHdr ){ @@ -68183,7 +68941,6 @@ static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int *pCnt){ rc = walIndexReadHdr(pWal, pChanged); } #ifdef SQLITE_ENABLE_SETLK_TIMEOUT - walDisableBlocking(pWal); if( rc==SQLITE_BUSY_TIMEOUT ){ rc = SQLITE_BUSY; *pCnt |= WAL_RETRY_BLOCKED_MASK; @@ -68198,6 +68955,7 @@ static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int *pCnt){ ** WAL_RETRY this routine will be called again and will probably be ** right on the second iteration. */ + (void)walEnableBlocking(pWal); if( pWal->apWiData[0]==0 ){ /* This branch is taken when the xShmMap() method returns SQLITE_BUSY. ** We assume this is a transient condition, so return WAL_RETRY. The @@ -68214,6 +68972,7 @@ static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int *pCnt){ rc = SQLITE_BUSY_RECOVERY; } } + walDisableBlocking(pWal); if( rc!=SQLITE_OK ){ return rc; } @@ -68604,8 +69363,11 @@ SQLITE_PRIVATE int sqlite3WalBeginReadTransaction(Wal *pWal, int *pChanged){ ** read-lock. */ SQLITE_PRIVATE void sqlite3WalEndReadTransaction(Wal *pWal){ - sqlite3WalEndWriteTransaction(pWal); +#ifndef SQLITE_ENABLE_SETLK_TIMEOUT + assert( pWal->writeLock==0 || pWal->readLock<0 ); +#endif if( pWal->readLock>=0 ){ + sqlite3WalEndWriteTransaction(pWal); walUnlockShared(pWal, WAL_READ_LOCK(pWal->readLock)); pWal->readLock = -1; } @@ -68798,7 +69560,7 @@ SQLITE_PRIVATE int sqlite3WalBeginWriteTransaction(Wal *pWal){ ** read-transaction was even opened, making this call a no-op. ** Return early. */ if( pWal->writeLock ){ - assert( !memcmp(&pWal->hdr,(void *)walIndexHdr(pWal),sizeof(WalIndexHdr)) ); + assert( !memcmp(&pWal->hdr,(void*)pWal->apWiData[0],sizeof(WalIndexHdr)) ); return SQLITE_OK; } #endif @@ -68898,6 +69660,7 @@ SQLITE_PRIVATE int sqlite3WalUndo(Wal *pWal, int (*xUndo)(void *, Pgno), void *p if( iMax!=pWal->hdr.mxFrame ) walCleanupHash(pWal); } SEH_EXCEPT( rc = SQLITE_IOERR_IN_PAGE; ) + pWal->iReCksum = 0; } return rc; } @@ -68945,6 +69708,9 @@ SQLITE_PRIVATE int sqlite3WalSavepointUndo(Wal *pWal, u32 *aWalData){ walCleanupHash(pWal); } SEH_EXCEPT( rc = SQLITE_IOERR_IN_PAGE; ) + if( pWal->iReCksum>pWal->hdr.mxFrame ){ + pWal->iReCksum = 0; + } } return rc; @@ -70247,6 +71013,12 @@ struct CellInfo { */ #define BTCURSOR_MAX_DEPTH 20 +/* +** Maximum amount of storage local to a database page, regardless of +** page size. +*/ +#define BT_MAX_LOCAL 65501 /* 65536 - 35 */ + /* ** A cursor is a pointer to a particular entry within a particular ** b-tree within a database file. @@ -70655,7 +71427,7 @@ SQLITE_PRIVATE int sqlite3BtreeHoldsMutex(Btree *p){ */ static void SQLITE_NOINLINE btreeEnterAll(sqlite3 *db){ int i; - int skipOk = 1; + u8 skipOk = 1; Btree *p; assert( sqlite3_mutex_held(db->mutex) ); for(i=0; inDb; i++){ @@ -71511,7 +72283,7 @@ static int saveCursorKey(BtCursor *pCur){ ** below. */ void *pKey; pCur->nKey = sqlite3BtreePayloadSize(pCur); - pKey = sqlite3Malloc( pCur->nKey + 9 + 8 ); + pKey = sqlite3Malloc( ((i64)pCur->nKey) + 9 + 8 ); if( pKey ){ rc = sqlite3BtreePayload(pCur, 0, (int)pCur->nKey, pKey); if( rc==SQLITE_OK ){ @@ -71801,7 +72573,7 @@ SQLITE_PRIVATE void sqlite3BtreeCursorHint(BtCursor *pCur, int eHintType, ...){ */ SQLITE_PRIVATE void sqlite3BtreeCursorHintFlags(BtCursor *pCur, unsigned x){ assert( x==BTREE_SEEK_EQ || x==BTREE_BULKLOAD || x==0 ); - pCur->hints = x; + pCur->hints = (u8)x; } @@ -71995,14 +72767,15 @@ static SQLITE_NOINLINE void btreeParseCellAdjustSizeForOverflow( static int btreePayloadToLocal(MemPage *pPage, i64 nPayload){ int maxLocal; /* Maximum amount of payload held locally */ maxLocal = pPage->maxLocal; + assert( nPayload>=0 ); if( nPayload<=maxLocal ){ - return nPayload; + return (int)nPayload; }else{ int minLocal; /* Minimum amount of payload held locally */ int surplus; /* Overflow payload available for local storage */ minLocal = pPage->minLocal; - surplus = minLocal + (nPayload - minLocal)%(pPage->pBt->usableSize-4); - return ( surplus <= maxLocal ) ? surplus : minLocal; + surplus = (int)(minLocal +(nPayload - minLocal)%(pPage->pBt->usableSize-4)); + return (surplus <= maxLocal) ? surplus : minLocal; } } @@ -72112,11 +72885,13 @@ static void btreeParseCellPtr( pInfo->pPayload = pIter; testcase( nPayload==pPage->maxLocal ); testcase( nPayload==(u32)pPage->maxLocal+1 ); + assert( nPayload>=0 ); + assert( pPage->maxLocal <= BT_MAX_LOCAL ); if( nPayload<=pPage->maxLocal ){ /* This is the (easy) common case where the entire payload fits ** on the local page. No overflow is required. */ - pInfo->nSize = nPayload + (u16)(pIter - pCell); + pInfo->nSize = (u16)nPayload + (u16)(pIter - pCell); if( pInfo->nSize<4 ) pInfo->nSize = 4; pInfo->nLocal = (u16)nPayload; }else{ @@ -72149,11 +72924,13 @@ static void btreeParseCellPtrIndex( pInfo->pPayload = pIter; testcase( nPayload==pPage->maxLocal ); testcase( nPayload==(u32)pPage->maxLocal+1 ); + assert( nPayload>=0 ); + assert( pPage->maxLocal <= BT_MAX_LOCAL ); if( nPayload<=pPage->maxLocal ){ /* This is the (easy) common case where the entire payload fits ** on the local page. No overflow is required. */ - pInfo->nSize = nPayload + (u16)(pIter - pCell); + pInfo->nSize = (u16)nPayload + (u16)(pIter - pCell); if( pInfo->nSize<4 ) pInfo->nSize = 4; pInfo->nLocal = (u16)nPayload; }else{ @@ -72692,14 +73469,14 @@ static SQLITE_INLINE int allocateSpace(MemPage *pPage, int nByte, int *pIdx){ ** at the end of the page. So do additional corruption checks inside this ** routine and return SQLITE_CORRUPT if any problems are found. */ -static int freeSpace(MemPage *pPage, u16 iStart, u16 iSize){ - u16 iPtr; /* Address of ptr to next freeblock */ - u16 iFreeBlk; /* Address of the next freeblock */ +static int freeSpace(MemPage *pPage, int iStart, int iSize){ + int iPtr; /* Address of ptr to next freeblock */ + int iFreeBlk; /* Address of the next freeblock */ u8 hdr; /* Page header size. 0 or 100 */ - u8 nFrag = 0; /* Reduction in fragmentation */ - u16 iOrigSize = iSize; /* Original value of iSize */ - u16 x; /* Offset to cell content area */ - u32 iEnd = iStart + iSize; /* First byte past the iStart buffer */ + int nFrag = 0; /* Reduction in fragmentation */ + int iOrigSize = iSize; /* Original value of iSize */ + int x; /* Offset to cell content area */ + int iEnd = iStart + iSize; /* First byte past the iStart buffer */ unsigned char *data = pPage->aData; /* Page content */ u8 *pTmp; /* Temporary ptr into data[] */ @@ -72726,7 +73503,7 @@ static int freeSpace(MemPage *pPage, u16 iStart, u16 iSize){ } iPtr = iFreeBlk; } - if( iFreeBlk>pPage->pBt->usableSize-4 ){ /* TH3: corrupt081.100 */ + if( iFreeBlk>(int)pPage->pBt->usableSize-4 ){ /* TH3: corrupt081.100 */ return SQLITE_CORRUPT_PAGE(pPage); } assert( iFreeBlk>iPtr || iFreeBlk==0 || CORRUPT_DB ); @@ -72741,7 +73518,7 @@ static int freeSpace(MemPage *pPage, u16 iStart, u16 iSize){ nFrag = iFreeBlk - iEnd; if( iEnd>iFreeBlk ) return SQLITE_CORRUPT_PAGE(pPage); iEnd = iFreeBlk + get2byte(&data[iFreeBlk+2]); - if( iEnd > pPage->pBt->usableSize ){ + if( iEnd > (int)pPage->pBt->usableSize ){ return SQLITE_CORRUPT_PAGE(pPage); } iSize = iEnd - iStart; @@ -72762,7 +73539,7 @@ static int freeSpace(MemPage *pPage, u16 iStart, u16 iSize){ } } if( nFrag>data[hdr+7] ) return SQLITE_CORRUPT_PAGE(pPage); - data[hdr+7] -= nFrag; + data[hdr+7] -= (u8)nFrag; } pTmp = &data[hdr+5]; x = get2byte(pTmp); @@ -72783,7 +73560,8 @@ static int freeSpace(MemPage *pPage, u16 iStart, u16 iSize){ /* Insert the new freeblock into the freelist */ put2byte(&data[iPtr], iStart); put2byte(&data[iStart], iFreeBlk); - put2byte(&data[iStart+2], iSize); + assert( iSize>=0 && iSize<=0xffff ); + put2byte(&data[iStart+2], (u16)iSize); } pPage->nFree += iOrigSize; return SQLITE_OK; @@ -73009,7 +73787,7 @@ static int btreeInitPage(MemPage *pPage){ assert( pBt->pageSize>=512 && pBt->pageSize<=65536 ); pPage->maskPage = (u16)(pBt->pageSize - 1); pPage->nOverflow = 0; - pPage->cellOffset = pPage->hdrOffset + 8 + pPage->childPtrSize; + pPage->cellOffset = (u16)(pPage->hdrOffset + 8 + pPage->childPtrSize); pPage->aCellIdx = data + pPage->childPtrSize + 8; pPage->aDataEnd = pPage->aData + pBt->pageSize; pPage->aDataOfst = pPage->aData + pPage->childPtrSize; @@ -73043,8 +73821,8 @@ static int btreeInitPage(MemPage *pPage){ static void zeroPage(MemPage *pPage, int flags){ unsigned char *data = pPage->aData; BtShared *pBt = pPage->pBt; - u8 hdr = pPage->hdrOffset; - u16 first; + int hdr = pPage->hdrOffset; + int first; assert( sqlite3PagerPagenumber(pPage->pDbPage)==pPage->pgno || CORRUPT_DB ); assert( sqlite3PagerGetExtra(pPage->pDbPage) == (void*)pPage ); @@ -73061,7 +73839,7 @@ static void zeroPage(MemPage *pPage, int flags){ put2byte(&data[hdr+5], pBt->usableSize); pPage->nFree = (u16)(pBt->usableSize - first); decodeFlags(pPage, flags); - pPage->cellOffset = first; + pPage->cellOffset = (u16)first; pPage->aDataEnd = &data[pBt->pageSize]; pPage->aCellIdx = &data[first]; pPage->aDataOfst = &data[pPage->childPtrSize]; @@ -73847,7 +74625,7 @@ SQLITE_PRIVATE int sqlite3BtreeSetPageSize(Btree *p, int pageSize, int nReserve, BtShared *pBt = p->pBt; assert( nReserve>=0 && nReserve<=255 ); sqlite3BtreeEnter(p); - pBt->nReserveWanted = nReserve; + pBt->nReserveWanted = (u8)nReserve; x = pBt->pageSize - pBt->usableSize; if( nReservebtsFlags & BTS_PAGESIZE_FIXED ){ @@ -73953,7 +74731,7 @@ SQLITE_PRIVATE int sqlite3BtreeSecureDelete(Btree *p, int newFlag){ assert( BTS_FAST_SECURE==(BTS_OVERWRITE|BTS_SECURE_DELETE) ); if( newFlag>=0 ){ p->pBt->btsFlags &= ~BTS_FAST_SECURE; - p->pBt->btsFlags |= BTS_SECURE_DELETE*newFlag; + p->pBt->btsFlags |= (u16)(BTS_SECURE_DELETE*newFlag); } b = (p->pBt->btsFlags & BTS_FAST_SECURE)/BTS_SECURE_DELETE; sqlite3BtreeLeave(p); @@ -74473,6 +75251,13 @@ static SQLITE_NOINLINE int btreeBeginTrans( (void)sqlite3PagerWalWriteLock(pPager, 0); unlockBtreeIfUnused(pBt); } +#if defined(SQLITE_ENABLE_SETLK_TIMEOUT) + if( rc==SQLITE_BUSY_TIMEOUT ){ + /* If a blocking lock timed out, break out of the loop here so that + ** the busy-handler is not invoked. */ + break; + } +#endif }while( (rc&0xFF)==SQLITE_BUSY && pBt->inTransaction==TRANS_NONE && btreeInvokeBusyHandler(pBt) ); sqlite3PagerWalDb(pPager, 0); @@ -76882,7 +77667,7 @@ SQLITE_PRIVATE int sqlite3BtreeIndexMoveto( rc = SQLITE_CORRUPT_PAGE(pPage); goto moveto_index_finish; } - pCellKey = sqlite3Malloc( nCell+nOverrun ); + pCellKey = sqlite3Malloc( (u64)nCell+(u64)nOverrun ); if( pCellKey==0 ){ rc = SQLITE_NOMEM_BKPT; goto moveto_index_finish; @@ -78401,7 +79186,8 @@ static int rebuildPage( } /* The pPg->nFree field is now set incorrectly. The caller will fix it. */ - pPg->nCell = nCell; + assert( nCell < 10922 ); + pPg->nCell = (u16)nCell; pPg->nOverflow = 0; put2byte(&aData[hdr+1], 0); @@ -78648,9 +79434,13 @@ static int editPage( if( pageInsertArray( pPg, pBegin, &pData, pCellptr, iNew+nCell, nNew-nCell, pCArray - ) ) goto editpage_fail; + ) + ){ + goto editpage_fail; + } - pPg->nCell = nNew; + assert( nNew < 10922 ); + pPg->nCell = (u16)nNew; pPg->nOverflow = 0; put2byte(&aData[hdr+3], pPg->nCell); @@ -78959,7 +79749,7 @@ static int balance_nonroot( int pageFlags; /* Value of pPage->aData[0] */ int iSpace1 = 0; /* First unused byte of aSpace1[] */ int iOvflSpace = 0; /* First unused byte of aOvflSpace[] */ - int szScratch; /* Size of scratch memory requested */ + u64 szScratch; /* Size of scratch memory requested */ MemPage *apOld[NB]; /* pPage and up to two siblings */ MemPage *apNew[NB+2]; /* pPage and up to NB siblings after balancing */ u8 *pRight; /* Location in parent of right-sibling pointer */ @@ -80244,7 +81034,7 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( if( pCur->info.nKey==pX->nKey ){ BtreePayload x2; x2.pData = pX->pKey; - x2.nData = pX->nKey; + x2.nData = (int)pX->nKey; assert( pX->nKey<=0x7fffffff ); x2.nZero = 0; return btreeOverwriteCell(pCur, &x2); } @@ -80425,7 +81215,7 @@ SQLITE_PRIVATE int sqlite3BtreeTransferRow(BtCursor *pDest, BtCursor *pSrc, i64 getCellInfo(pSrc); if( pSrc->info.nPayload<0x80 ){ - *(aOut++) = pSrc->info.nPayload; + *(aOut++) = (u8)pSrc->info.nPayload; }else{ aOut += sqlite3PutVarint(aOut, pSrc->info.nPayload); } @@ -80438,7 +81228,7 @@ SQLITE_PRIVATE int sqlite3BtreeTransferRow(BtCursor *pDest, BtCursor *pSrc, i64 nRem = pSrc->info.nPayload; if( nIn==nRem && nInpPage->maxLocal ){ memcpy(aOut, aIn, nIn); - pBt->nPreformatSize = nIn + (aOut - pBt->pTmpSpace); + pBt->nPreformatSize = nIn + (int)(aOut - pBt->pTmpSpace); return SQLITE_OK; }else{ int rc = SQLITE_OK; @@ -80450,7 +81240,7 @@ SQLITE_PRIVATE int sqlite3BtreeTransferRow(BtCursor *pDest, BtCursor *pSrc, i64 u32 nOut; /* Size of output buffer aOut[] */ nOut = btreePayloadToLocal(pDest->pPage, pSrc->info.nPayload); - pBt->nPreformatSize = nOut + (aOut - pBt->pTmpSpace); + pBt->nPreformatSize = (int)nOut + (int)(aOut - pBt->pTmpSpace); if( nOutinfo.nPayload ){ pPgnoOut = &aOut[nOut]; pBt->nPreformatSize += 4; @@ -82071,6 +82861,7 @@ SQLITE_PRIVATE int sqlite3BtreeIsInBackup(Btree *p){ */ SQLITE_PRIVATE void *sqlite3BtreeSchema(Btree *p, int nBytes, void(*xFree)(void *)){ BtShared *pBt = p->pBt; + assert( nBytes==0 || nBytes==sizeof(Schema) ); sqlite3BtreeEnter(p); if( !pBt->pSchema && nBytes ){ pBt->pSchema = sqlite3DbMallocZero(0, nBytes); @@ -83187,7 +83978,7 @@ static void vdbeMemRenderNum(int sz, char *zBuf, Mem *p){ ** corresponding string value, then it is important that the string be ** derived from the numeric value, not the other way around, to ensure ** that the index and table are consistent. See ticket -** https://www.sqlite.org/src/info/343634942dd54ab (2018-01-31) for +** https://sqlite.org/src/info/343634942dd54ab (2018-01-31) for ** an example. ** ** This routine looks at pMem to verify that if it has both a numeric @@ -83373,7 +84164,7 @@ SQLITE_PRIVATE void sqlite3VdbeMemZeroTerminateIfAble(Mem *pMem){ return; } if( pMem->enc!=SQLITE_UTF8 ) return; - if( NEVER(pMem->z==0) ) return; + assert( pMem->z!=0 ); if( pMem->flags & MEM_Dyn ){ if( pMem->xDel==sqlite3_free && sqlite3_msize(pMem->z) >= (u64)(pMem->n+1) @@ -84486,7 +85277,7 @@ static sqlite3_value *valueNew(sqlite3 *db, struct ValueNewStat4Ctx *p){ if( pRec==0 ){ Index *pIdx = p->pIdx; /* Index being probed */ - int nByte; /* Bytes of space to allocate */ + i64 nByte; /* Bytes of space to allocate */ int i; /* Counter variable */ int nCol = pIdx->nColumn; /* Number of index columns including rowid */ @@ -84552,7 +85343,7 @@ static int valueFromFunction( ){ sqlite3_context ctx; /* Context object for function invocation */ sqlite3_value **apVal = 0; /* Function arguments */ - int nVal = 0; /* Size of apVal[] array */ + int nVal = 0; /* Number of function arguments */ FuncDef *pFunc = 0; /* Function definition */ sqlite3_value *pVal = 0; /* New value */ int rc = SQLITE_OK; /* Return code */ @@ -85550,12 +86341,10 @@ SQLITE_PRIVATE int sqlite3VdbeAddFunctionCall( int eCallCtx /* Calling context */ ){ Vdbe *v = pParse->pVdbe; - int nByte; int addr; sqlite3_context *pCtx; assert( v ); - nByte = sizeof(*pCtx) + (nArg-1)*sizeof(sqlite3_value*); - pCtx = sqlite3DbMallocRawNN(pParse->db, nByte); + pCtx = sqlite3DbMallocRawNN(pParse->db, SZ_CONTEXT(nArg)); if( pCtx==0 ){ assert( pParse->db->mallocFailed ); freeEphemeralFunction(pParse->db, (FuncDef*)pFunc); @@ -85831,7 +86620,7 @@ static Op *opIterNext(VdbeOpIter *p){ } if( pRet->p4type==P4_SUBPROGRAM ){ - int nByte = (p->nSub+1)*sizeof(SubProgram*); + i64 nByte = (1+(u64)p->nSub)*sizeof(SubProgram*); int j; for(j=0; jnSub; j++){ if( p->apSub[j]==pRet->p4.pProgram ) break; @@ -85961,8 +86750,8 @@ SQLITE_PRIVATE void sqlite3VdbeAssertAbortable(Vdbe *p){ ** (1) For each jump instruction with a negative P2 value (a label) ** resolve the P2 value to an actual address. ** -** (2) Compute the maximum number of arguments used by any SQL function -** and store that value in *pMaxFuncArgs. +** (2) Compute the maximum number of arguments used by the xUpdate/xFilter +** methods of any virtual table and store that value in *pMaxVtabArgs. ** ** (3) Update the Vdbe.readOnly and Vdbe.bIsReader flags to accurately ** indicate what the prepared statement actually does. @@ -85975,8 +86764,8 @@ SQLITE_PRIVATE void sqlite3VdbeAssertAbortable(Vdbe *p){ ** script numbers the opcodes correctly. Changes to this routine must be ** coordinated with changes to mkopcodeh.tcl. */ -static void resolveP2Values(Vdbe *p, int *pMaxFuncArgs){ - int nMaxArgs = *pMaxFuncArgs; +static void resolveP2Values(Vdbe *p, int *pMaxVtabArgs){ + int nMaxVtabArgs = *pMaxVtabArgs; Op *pOp; Parse *pParse = p->pParse; int *aLabel = pParse->aLabel; @@ -86021,15 +86810,19 @@ static void resolveP2Values(Vdbe *p, int *pMaxFuncArgs){ } #ifndef SQLITE_OMIT_VIRTUALTABLE case OP_VUpdate: { - if( pOp->p2>nMaxArgs ) nMaxArgs = pOp->p2; + if( pOp->p2>nMaxVtabArgs ) nMaxVtabArgs = pOp->p2; break; } case OP_VFilter: { int n; + /* The instruction immediately prior to VFilter will be an + ** OP_Integer that sets the "argc" value for the VFilter. See + ** the code where OP_VFilter is generated at tag-20250207a. */ assert( (pOp - p->aOp) >= 3 ); assert( pOp[-1].opcode==OP_Integer ); + assert( pOp[-1].p2==pOp->p3+1 ); n = pOp[-1].p1; - if( n>nMaxArgs ) nMaxArgs = n; + if( n>nMaxVtabArgs ) nMaxVtabArgs = n; /* Fall through into the default case */ /* no break */ deliberate_fall_through } @@ -86070,7 +86863,7 @@ static void resolveP2Values(Vdbe *p, int *pMaxFuncArgs){ pParse->aLabel = 0; } pParse->nLabel = 0; - *pMaxFuncArgs = nMaxArgs; + *pMaxVtabArgs = nMaxVtabArgs; assert( p->bIsReader!=0 || DbMaskAllZero(p->btreeMask) ); } @@ -86299,7 +87092,7 @@ SQLITE_PRIVATE void sqlite3VdbeScanStatus( const char *zName /* Name of table or index being scanned */ ){ if( IS_STMT_SCANSTATUS(p->db) ){ - sqlite3_int64 nByte = (p->nScan+1) * sizeof(ScanStatus); + i64 nByte = (1+(i64)p->nScan) * sizeof(ScanStatus); ScanStatus *aNew; aNew = (ScanStatus*)sqlite3DbRealloc(p->db, p->aScan, nByte); if( aNew ){ @@ -86409,6 +87202,9 @@ SQLITE_PRIVATE void sqlite3VdbeChangeP5(Vdbe *p, u16 p5){ */ SQLITE_PRIVATE void sqlite3VdbeTypeofColumn(Vdbe *p, int iDest){ VdbeOp *pOp = sqlite3VdbeGetLastOp(p); +#ifdef SQLITE_DEBUG + while( pOp->opcode==OP_ReleaseReg ) pOp--; +#endif if( pOp->p3==iDest && pOp->opcode==OP_Column ){ pOp->p5 |= OPFLAG_TYPEOFARG; } @@ -87748,7 +88544,7 @@ SQLITE_PRIVATE void sqlite3VdbeMakeReady( int nVar; /* Number of parameters */ int nMem; /* Number of VM memory registers */ int nCursor; /* Number of cursors required */ - int nArg; /* Number of arguments in subprograms */ + int nArg; /* Max number args to xFilter or xUpdate */ int n; /* Loop counter */ struct ReusableSpace x; /* Reusable bulk memory */ @@ -87820,6 +88616,9 @@ SQLITE_PRIVATE void sqlite3VdbeMakeReady( p->apCsr = allocSpace(&x, p->apCsr, nCursor*sizeof(VdbeCursor*)); } } +#ifdef SQLITE_DEBUG + p->napArg = nArg; +#endif if( db->mallocFailed ){ p->nVar = 0; @@ -89317,6 +90116,7 @@ SQLITE_PRIVATE UnpackedRecord *sqlite3VdbeAllocUnpackedRecord( ){ UnpackedRecord *p; /* Unpacked record to return */ int nByte; /* Number of bytes required for *p */ + assert( sizeof(UnpackedRecord) + sizeof(Mem)*65536 < 0x7fffffff ); nByte = ROUND8P(sizeof(UnpackedRecord)) + sizeof(Mem)*(pKeyInfo->nKeyField+1); p = (UnpackedRecord *)sqlite3DbMallocRaw(pKeyInfo->db, nByte); if( !p ) return 0; @@ -90623,10 +91423,11 @@ SQLITE_PRIVATE void sqlite3VdbePreUpdateHook( preupdate.pCsr = pCsr; preupdate.op = op; preupdate.iNewReg = iReg; - preupdate.keyinfo.db = db; - preupdate.keyinfo.enc = ENC(db); - preupdate.keyinfo.nKeyField = pTab->nCol; - preupdate.keyinfo.aSortFlags = (u8*)&fakeSortOrder; + preupdate.pKeyinfo = (KeyInfo*)&preupdate.keyinfoSpace; + preupdate.pKeyinfo->db = db; + preupdate.pKeyinfo->enc = ENC(db); + preupdate.pKeyinfo->nKeyField = pTab->nCol; + preupdate.pKeyinfo->aSortFlags = (u8*)&fakeSortOrder; preupdate.iKey1 = iKey1; preupdate.iKey2 = iKey2; preupdate.pTab = pTab; @@ -90636,8 +91437,8 @@ SQLITE_PRIVATE void sqlite3VdbePreUpdateHook( db->xPreUpdateCallback(db->pPreUpdateArg, db, op, zDb, zTbl, iKey1, iKey2); db->pPreUpdate = 0; sqlite3DbFree(db, preupdate.aRecord); - vdbeFreeUnpacked(db, preupdate.keyinfo.nKeyField+1, preupdate.pUnpacked); - vdbeFreeUnpacked(db, preupdate.keyinfo.nKeyField+1, preupdate.pNewUnpacked); + vdbeFreeUnpacked(db, preupdate.pKeyinfo->nKeyField+1,preupdate.pUnpacked); + vdbeFreeUnpacked(db, preupdate.pKeyinfo->nKeyField+1,preupdate.pNewUnpacked); sqlite3VdbeMemRelease(&preupdate.oldipk); if( preupdate.aNew ){ int i; @@ -92468,7 +93269,7 @@ SQLITE_API int sqlite3_bind_text64( assert( xDel!=SQLITE_DYNAMIC ); if( enc!=SQLITE_UTF8 ){ if( enc==SQLITE_UTF16 ) enc = SQLITE_UTF16NATIVE; - nData &= ~(u16)1; + nData &= ~(u64)1; } return bindText(pStmt, i, zData, nData, xDel, enc); } @@ -92876,7 +93677,7 @@ SQLITE_API int sqlite3_preupdate_old(sqlite3 *db, int iIdx, sqlite3_value **ppVa if( !aRec ) goto preupdate_old_out; rc = sqlite3BtreePayload(p->pCsr->uc.pCursor, 0, nRec, aRec); if( rc==SQLITE_OK ){ - p->pUnpacked = vdbeUnpackRecord(&p->keyinfo, nRec, aRec); + p->pUnpacked = vdbeUnpackRecord(p->pKeyinfo, nRec, aRec); if( !p->pUnpacked ) rc = SQLITE_NOMEM; } if( rc!=SQLITE_OK ){ @@ -92893,7 +93694,9 @@ SQLITE_API int sqlite3_preupdate_old(sqlite3 *db, int iIdx, sqlite3_value **ppVa Column *pCol = &p->pTab->aCol[iIdx]; if( pCol->iDflt>0 ){ if( p->apDflt==0 ){ - int nByte = sizeof(sqlite3_value*)*p->pTab->nCol; + int nByte; + assert( sizeof(sqlite3_value*)*UMXV(p->pTab->nCol) < 0x7fffffff ); + nByte = sizeof(sqlite3_value*)*p->pTab->nCol; p->apDflt = (sqlite3_value**)sqlite3DbMallocZero(db, nByte); if( p->apDflt==0 ) goto preupdate_old_out; } @@ -92939,7 +93742,7 @@ SQLITE_API int sqlite3_preupdate_count(sqlite3 *db){ #else p = db->pPreUpdate; #endif - return (p ? p->keyinfo.nKeyField : 0); + return (p ? p->pKeyinfo->nKeyField : 0); } #endif /* SQLITE_ENABLE_PREUPDATE_HOOK */ @@ -93022,7 +93825,7 @@ SQLITE_API int sqlite3_preupdate_new(sqlite3 *db, int iIdx, sqlite3_value **ppVa Mem *pData = &p->v->aMem[p->iNewReg]; rc = ExpandBlob(pData); if( rc!=SQLITE_OK ) goto preupdate_new_out; - pUnpack = vdbeUnpackRecord(&p->keyinfo, pData->n, pData->z); + pUnpack = vdbeUnpackRecord(p->pKeyinfo, pData->n, pData->z); if( !pUnpack ){ rc = SQLITE_NOMEM; goto preupdate_new_out; @@ -93043,7 +93846,8 @@ SQLITE_API int sqlite3_preupdate_new(sqlite3 *db, int iIdx, sqlite3_value **ppVa */ assert( p->op==SQLITE_UPDATE ); if( !p->aNew ){ - p->aNew = (Mem *)sqlite3DbMallocZero(db, sizeof(Mem) * p->pCsr->nField); + assert( sizeof(Mem)*UMXV(p->pCsr->nField) < 0x7fffffff ); + p->aNew = (Mem *)sqlite3DbMallocZero(db, sizeof(Mem)*p->pCsr->nField); if( !p->aNew ){ rc = SQLITE_NOMEM; goto preupdate_new_out; @@ -93813,11 +94617,11 @@ static VdbeCursor *allocateCursor( */ Mem *pMem = iCur>0 ? &p->aMem[p->nMem-iCur] : p->aMem; - int nByte; + i64 nByte; VdbeCursor *pCx = 0; - nByte = - ROUND8P(sizeof(VdbeCursor)) + 2*sizeof(u32)*nField + - (eCurType==CURTYPE_BTREE?sqlite3BtreeCursorSize():0); + nByte = SZ_VDBECURSOR(nField); + assert( ROUND8(nByte)==nByte ); + if( eCurType==CURTYPE_BTREE ) nByte += sqlite3BtreeCursorSize(); assert( iCur>=0 && iCurnCursor ); if( p->apCsr[iCur] ){ /*OPTIMIZATION-IF-FALSE*/ @@ -93841,7 +94645,7 @@ static VdbeCursor *allocateCursor( pMem->szMalloc = 0; return 0; } - pMem->szMalloc = nByte; + pMem->szMalloc = (int)nByte; } p->apCsr[iCur] = pCx = (VdbeCursor*)pMem->zMalloc; @@ -93850,8 +94654,8 @@ static VdbeCursor *allocateCursor( pCx->nField = nField; pCx->aOffset = &pCx->aType[nField]; if( eCurType==CURTYPE_BTREE ){ - pCx->uc.pCursor = (BtCursor*) - &pMem->z[ROUND8P(sizeof(VdbeCursor))+2*sizeof(u32)*nField]; + assert( ROUND8(SZ_VDBECURSOR(nField))==SZ_VDBECURSOR(nField) ); + pCx->uc.pCursor = (BtCursor*)&pMem->z[SZ_VDBECURSOR(nField)]; sqlite3BtreeCursorZero(pCx->uc.pCursor); } return pCx; @@ -94855,7 +95659,7 @@ case OP_Halt: { sqlite3VdbeError(p, "%s", pOp->p4.z); } pcx = (int)(pOp - aOp); - sqlite3_log(pOp->p1, "abort at %d in [%s]: %s", pcx, p->zSql, p->zErrMsg); + sqlite3_log(pOp->p1, "abort at %d: %s; [%s]", pcx, p->zErrMsg, p->zSql); } rc = sqlite3VdbeHalt(p); assert( rc==SQLITE_BUSY || rc==SQLITE_OK || rc==SQLITE_ERROR ); @@ -96181,7 +96985,7 @@ case OP_BitNot: { /* same as TK_BITNOT, in1, out2 */ break; } -/* Opcode: Once P1 P2 * * * +/* Opcode: Once P1 P2 P3 * * ** ** Fall through to the next instruction the first time this opcode is ** encountered on each invocation of the byte-code program. Jump to P2 @@ -96197,6 +97001,12 @@ case OP_BitNot: { /* same as TK_BITNOT, in1, out2 */ ** whether or not the jump should be taken. The bitmask is necessary ** because the self-altering code trick does not work for recursive ** triggers. +** +** The P3 operand is not used directly by this opcode. However P3 is +** used by the code generator as follows: If this opcode is the start +** of a subroutine and that subroutine uses a Bloom filter, then P3 will +** be the register that holds that Bloom filter. See tag-202407032019 +** in the source code for implementation details. */ case OP_Once: { /* jump */ u32 iAddr; /* Address of this instruction */ @@ -97242,6 +98052,7 @@ case OP_MakeRecord: { zHdr += sqlite3PutVarint(zHdr, serial_type); if( pRec->n ){ assert( pRec->z!=0 ); + assert( pRec->z!=(const char*)sqlite3CtypeMap ); memcpy(zPayload, pRec->z, pRec->n); zPayload += pRec->n; } @@ -99593,7 +100404,7 @@ case OP_RowData: { /* The OP_RowData opcodes always follow OP_NotExists or ** OP_SeekRowid or OP_Rewind/Op_Next with no intervening instructions ** that might invalidate the cursor. - ** If this where not the case, on of the following assert()s + ** If this were not the case, one of the following assert()s ** would fail. Should this ever change (because of changes in the code ** generator) then the fix would be to insert a call to ** sqlite3VdbeCursorMoveto(). @@ -100862,7 +101673,7 @@ case OP_RowSetTest: { /* jump, in1, in3 */ */ case OP_Program: { /* jump0 */ int nMem; /* Number of memory registers for sub-program */ - int nByte; /* Bytes of runtime space required for sub-program */ + i64 nByte; /* Bytes of runtime space required for sub-program */ Mem *pRt; /* Register to allocate runtime space */ Mem *pMem; /* Used to iterate through memory cells */ Mem *pEnd; /* Last memory cell in new array */ @@ -100913,7 +101724,7 @@ case OP_Program: { /* jump0 */ nByte = ROUND8(sizeof(VdbeFrame)) + nMem * sizeof(Mem) + pProgram->nCsr * sizeof(VdbeCursor*) - + (pProgram->nOp + 7)/8; + + (7 + (i64)pProgram->nOp)/8; pFrame = sqlite3DbMallocZero(db, nByte); if( !pFrame ){ goto no_mem; @@ -100921,7 +101732,7 @@ case OP_Program: { /* jump0 */ sqlite3VdbeMemRelease(pRt); pRt->flags = MEM_Blob|MEM_Dyn; pRt->z = (char*)pFrame; - pRt->n = nByte; + pRt->n = (int)nByte; pRt->xDel = sqlite3VdbeFrameMemDel; pFrame->v = p; @@ -101020,12 +101831,14 @@ case OP_Param: { /* out2 */ ** statement counter is incremented (immediate foreign key constraints). */ case OP_FkCounter: { - if( db->flags & SQLITE_DeferFKs ){ - db->nDeferredImmCons += pOp->p2; - }else if( pOp->p1 ){ + if( pOp->p1 ){ db->nDeferredCons += pOp->p2; }else{ - p->nFkConstraint += pOp->p2; + if( db->flags & SQLITE_DeferFKs ){ + db->nDeferredImmCons += pOp->p2; + }else{ + p->nFkConstraint += pOp->p2; + } } break; } @@ -101240,7 +102053,7 @@ case OP_AggStep: { ** ** Note: We could avoid this by using a regular memory cell from aMem[] for ** the accumulator, instead of allocating one here. */ - nAlloc = ROUND8P( sizeof(pCtx[0]) + (n-1)*sizeof(sqlite3_value*) ); + nAlloc = ROUND8P( SZ_CONTEXT(n) ); pCtx = sqlite3DbMallocRawNN(db, nAlloc + sizeof(Mem)); if( pCtx==0 ) goto no_mem; pCtx->pOut = (Mem*)((u8*)pCtx + nAlloc); @@ -101900,6 +102713,7 @@ case OP_VFilter: { /* jump, ncycle */ /* Invoke the xFilter method */ apArg = p->apArg; + assert( nArg<=p->napArg ); for(i = 0; ivtabOnConflict; apArg = p->apArg; pX = &aMem[pOp->p3]; + assert( nArg<=p->napArg ); for(i=0; irc = rc; sqlite3SystemError(db, rc); testcase( sqlite3GlobalConfig.xLog!=0 ); - sqlite3_log(rc, "statement aborts at %d: [%s] %s", - (int)(pOp - aOp), p->zSql, p->zErrMsg); + sqlite3_log(rc, "statement aborts at %d: %s; [%s]", + (int)(pOp - aOp), p->zErrMsg, p->zSql); if( p->eVdbeState==VDBE_RUN_STATE ) sqlite3VdbeHalt(p); if( rc==SQLITE_IOERR_NOMEM ) sqlite3OomFault(db); if( rc==SQLITE_CORRUPT && db->autoCommit==0 ){ @@ -102896,6 +103711,7 @@ SQLITE_API int sqlite3_blob_open( char *zErr = 0; Table *pTab; Incrblob *pBlob = 0; + int iDb; Parse sParse; #ifdef SQLITE_ENABLE_API_ARMOR @@ -102941,7 +103757,10 @@ SQLITE_API int sqlite3_blob_open( sqlite3ErrorMsg(&sParse, "cannot open view: %s", zTable); } #endif - if( !pTab ){ + if( pTab==0 + || ((iDb = sqlite3SchemaToIndex(db, pTab->pSchema))==1 && + sqlite3OpenTempDatabase(&sParse)) + ){ if( sParse.zErrMsg ){ sqlite3DbFree(db, zErr); zErr = sParse.zErrMsg; @@ -102952,15 +103771,11 @@ SQLITE_API int sqlite3_blob_open( goto blob_open_out; } pBlob->pTab = pTab; - pBlob->zDb = db->aDb[sqlite3SchemaToIndex(db, pTab->pSchema)].zDbSName; + pBlob->zDb = db->aDb[iDb].zDbSName; /* Now search pTab for the exact column. */ - for(iCol=0; iColnCol; iCol++) { - if( sqlite3StrICmp(pTab->aCol[iCol].zCnName, zColumn)==0 ){ - break; - } - } - if( iCol==pTab->nCol ){ + iCol = sqlite3ColumnIndex(pTab, zColumn); + if( iCol<0 ){ sqlite3DbFree(db, zErr); zErr = sqlite3MPrintf(db, "no such column: \"%s\"", zColumn); rc = SQLITE_ERROR; @@ -103040,7 +103855,6 @@ SQLITE_API int sqlite3_blob_open( {OP_Halt, 0, 0, 0}, /* 5 */ }; Vdbe *v = (Vdbe *)pBlob->pStmt; - int iDb = sqlite3SchemaToIndex(db, pTab->pSchema); VdbeOp *aOp; sqlite3VdbeAddOp4Int(v, OP_Transaction, iDb, wrFlag, @@ -103618,9 +104432,12 @@ struct VdbeSorter { u8 iPrev; /* Previous thread used to flush PMA */ u8 nTask; /* Size of aTask[] array */ u8 typeMask; - SortSubtask aTask[1]; /* One or more subtasks */ + SortSubtask aTask[FLEXARRAY]; /* One or more subtasks */ }; +/* Size (in bytes) of a VdbeSorter object that works with N or fewer subtasks */ +#define SZ_VDBESORTER(N) (offsetof(VdbeSorter,aTask)+(N)*sizeof(SortSubtask)) + #define SORTER_TYPE_INTEGER 0x01 #define SORTER_TYPE_TEXT 0x02 @@ -104222,7 +105039,7 @@ SQLITE_PRIVATE int sqlite3VdbeSorterInit( VdbeSorter *pSorter; /* The new sorter */ KeyInfo *pKeyInfo; /* Copy of pCsr->pKeyInfo with db==0 */ int szKeyInfo; /* Size of pCsr->pKeyInfo in bytes */ - int sz; /* Size of pSorter in bytes */ + i64 sz; /* Size of pSorter in bytes */ int rc = SQLITE_OK; #if SQLITE_MAX_WORKER_THREADS==0 # define nWorker 0 @@ -104250,8 +105067,10 @@ SQLITE_PRIVATE int sqlite3VdbeSorterInit( assert( pCsr->pKeyInfo ); assert( !pCsr->isEphemeral ); assert( pCsr->eCurType==CURTYPE_SORTER ); - szKeyInfo = sizeof(KeyInfo) + (pCsr->pKeyInfo->nKeyField-1)*sizeof(CollSeq*); - sz = sizeof(VdbeSorter) + nWorker * sizeof(SortSubtask); + assert( sizeof(KeyInfo) + UMXV(pCsr->pKeyInfo->nKeyField)*sizeof(CollSeq*) + < 0x7fffffff ); + szKeyInfo = SZ_KEYINFO(pCsr->pKeyInfo->nKeyField); + sz = SZ_VDBESORTER(nWorker+1); pSorter = (VdbeSorter*)sqlite3DbMallocZero(db, sz + szKeyInfo); pCsr->uc.pSorter = pSorter; @@ -104463,7 +105282,7 @@ static int vdbeSorterJoinAll(VdbeSorter *pSorter, int rcin){ */ static MergeEngine *vdbeMergeEngineNew(int nReader){ int N = 2; /* Smallest power of two >= nReader */ - int nByte; /* Total bytes of space to allocate */ + i64 nByte; /* Total bytes of space to allocate */ MergeEngine *pNew; /* Pointer to allocated object to return */ assert( nReader<=SORTER_MAX_MERGE_COUNT ); @@ -104715,6 +105534,10 @@ static int vdbeSorterSort(SortSubtask *pTask, SorterList *pList){ p->u.pNext = 0; for(i=0; aSlot[i]; i++){ p = vdbeSorterMerge(pTask, p, aSlot[i]); + /* ,--Each aSlot[] holds twice as much as the previous. So we cannot use + ** | up all 64 aSlots[] with only a 64-bit address space. + ** v */ + assert( iop on success */ Table *pTab = 0; /* Table holding the row */ - Column *pCol; /* A column of pTab */ ExprList *pFJMatch = 0; /* Matches for FULL JOIN .. USING */ const char *zCol = pRight->u.zToken; @@ -107557,7 +108379,6 @@ static int lookupName( if( pSrcList ){ for(i=0, pItem=pSrcList->a; inSrc; i++, pItem++){ - u8 hCol; pTab = pItem->pSTab; assert( pTab!=0 && pTab->zName!=0 ); assert( pTab->nCol>0 || pParse->nErr ); @@ -107645,43 +108466,38 @@ static int lookupName( sqlite3RenameTokenRemap(pParse, 0, (void*)&pExpr->y.pTab); } } - hCol = sqlite3StrIHash(zCol); - for(j=0, pCol=pTab->aCol; jnCol; j++, pCol++){ - if( pCol->hName==hCol - && sqlite3StrICmp(pCol->zCnName, zCol)==0 - ){ - if( cnt>0 ){ - if( pItem->fg.isUsing==0 - || sqlite3IdListIndex(pItem->u3.pUsing, zCol)<0 - ){ - /* Two or more tables have the same column name which is - ** not joined by USING. This is an error. Signal as much - ** by clearing pFJMatch and letting cnt go above 1. */ - sqlite3ExprListDelete(db, pFJMatch); - pFJMatch = 0; - }else - if( (pItem->fg.jointype & JT_RIGHT)==0 ){ - /* An INNER or LEFT JOIN. Use the left-most table */ - continue; - }else - if( (pItem->fg.jointype & JT_LEFT)==0 ){ - /* A RIGHT JOIN. Use the right-most table */ - cnt = 0; - sqlite3ExprListDelete(db, pFJMatch); - pFJMatch = 0; - }else{ - /* For a FULL JOIN, we must construct a coalesce() func */ - extendFJMatch(pParse, &pFJMatch, pMatch, pExpr->iColumn); - } - } - cnt++; - pMatch = pItem; - /* Substitute the rowid (column -1) for the INTEGER PRIMARY KEY */ - pExpr->iColumn = j==pTab->iPKey ? -1 : (i16)j; - if( pItem->fg.isNestedFrom ){ - sqlite3SrcItemColumnUsed(pItem, j); + j = sqlite3ColumnIndex(pTab, zCol); + if( j>=0 ){ + if( cnt>0 ){ + if( pItem->fg.isUsing==0 + || sqlite3IdListIndex(pItem->u3.pUsing, zCol)<0 + ){ + /* Two or more tables have the same column name which is + ** not joined by USING. This is an error. Signal as much + ** by clearing pFJMatch and letting cnt go above 1. */ + sqlite3ExprListDelete(db, pFJMatch); + pFJMatch = 0; + }else + if( (pItem->fg.jointype & JT_RIGHT)==0 ){ + /* An INNER or LEFT JOIN. Use the left-most table */ + continue; + }else + if( (pItem->fg.jointype & JT_LEFT)==0 ){ + /* A RIGHT JOIN. Use the right-most table */ + cnt = 0; + sqlite3ExprListDelete(db, pFJMatch); + pFJMatch = 0; + }else{ + /* For a FULL JOIN, we must construct a coalesce() func */ + extendFJMatch(pParse, &pFJMatch, pMatch, pExpr->iColumn); } - break; + } + cnt++; + pMatch = pItem; + /* Substitute the rowid (column -1) for the INTEGER PRIMARY KEY */ + pExpr->iColumn = j==pTab->iPKey ? -1 : (i16)j; + if( pItem->fg.isNestedFrom ){ + sqlite3SrcItemColumnUsed(pItem, j); } } if( 0==cnt && VisibleRowid(pTab) ){ @@ -107771,23 +108587,18 @@ static int lookupName( if( pTab ){ int iCol; - u8 hCol = sqlite3StrIHash(zCol); pSchema = pTab->pSchema; cntTab++; - for(iCol=0, pCol=pTab->aCol; iColnCol; iCol++, pCol++){ - if( pCol->hName==hCol - && sqlite3StrICmp(pCol->zCnName, zCol)==0 - ){ - if( iCol==pTab->iPKey ){ - iCol = -1; - } - break; + iCol = sqlite3ColumnIndex(pTab, zCol); + if( iCol>=0 ){ + if( pTab->iPKey==iCol ) iCol = -1; + }else{ + if( sqlite3IsRowid(zCol) && VisibleRowid(pTab) ){ + iCol = -1; + }else{ + iCol = pTab->nCol; } } - if( iCol>=pTab->nCol && sqlite3IsRowid(zCol) && VisibleRowid(pTab) ){ - /* IMP: R-51414-32910 */ - iCol = -1; - } if( iColnCol ){ cnt++; pMatch = 0; @@ -108426,13 +109237,12 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ ** sqlite_version() that might change over time cannot be used ** in an index or generated column. Curiously, they can be used ** in a CHECK constraint. SQLServer, MySQL, and PostgreSQL all - ** all this. */ + ** allow this. */ sqlite3ResolveNotValid(pParse, pNC, "non-deterministic functions", NC_IdxExpr|NC_PartIdx|NC_GenCol, 0, pExpr); }else{ assert( (NC_SelfRef & 0xff)==NC_SelfRef ); /* Must fit in 8 bits */ pExpr->op2 = pNC->ncFlags & NC_SelfRef; - if( pNC->ncFlags & NC_FromDDL ) ExprSetProperty(pExpr, EP_FromDDL); } if( (pDef->funcFlags & SQLITE_FUNC_INTERNAL)!=0 && pParse->nested==0 @@ -108448,6 +109258,7 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ if( (pDef->funcFlags & (SQLITE_FUNC_DIRECT|SQLITE_FUNC_UNSAFE))!=0 && !IN_RENAME_OBJECT ){ + if( pNC->ncFlags & NC_FromDDL ) ExprSetProperty(pExpr, EP_FromDDL); sqlite3ExprFunctionUsable(pParse, pExpr, pDef); } } @@ -109501,20 +110312,22 @@ SQLITE_PRIVATE int sqlite3ResolveSelfReference( Expr *pExpr, /* Expression to resolve. May be NULL. */ ExprList *pList /* Expression list to resolve. May be NULL. */ ){ - SrcList sSrc; /* Fake SrcList for pParse->pNewTable */ + SrcList *pSrc; /* Fake SrcList for pParse->pNewTable */ NameContext sNC; /* Name context for pParse->pNewTable */ int rc; + u8 srcSpace[SZ_SRCLIST_1]; /* Memory space for the fake SrcList */ assert( type==0 || pTab!=0 ); assert( type==NC_IsCheck || type==NC_PartIdx || type==NC_IdxExpr || type==NC_GenCol || pTab==0 ); memset(&sNC, 0, sizeof(sNC)); - memset(&sSrc, 0, sizeof(sSrc)); + pSrc = (SrcList*)srcSpace; + memset(pSrc, 0, SZ_SRCLIST_1); if( pTab ){ - sSrc.nSrc = 1; - sSrc.a[0].zName = pTab->zName; - sSrc.a[0].pSTab = pTab; - sSrc.a[0].iCursor = -1; + pSrc->nSrc = 1; + pSrc->a[0].zName = pTab->zName; + pSrc->a[0].pSTab = pTab; + pSrc->a[0].iCursor = -1; if( pTab->pSchema!=pParse->db->aDb[1].pSchema ){ /* Cause EP_FromDDL to be set on TK_FUNCTION nodes of non-TEMP ** schema elements */ @@ -109522,7 +110335,7 @@ SQLITE_PRIVATE int sqlite3ResolveSelfReference( } } sNC.pParse = pParse; - sNC.pSrcList = &sSrc; + sNC.pSrcList = pSrc; sNC.ncFlags = type | NC_IsDDL; if( (rc = sqlite3ResolveExprNames(&sNC, pExpr))!=SQLITE_OK ) return rc; if( pList ) rc = sqlite3ResolveExprListNames(&sNC, pList); @@ -109606,7 +110419,9 @@ SQLITE_PRIVATE char sqlite3ExprAffinity(const Expr *pExpr){ pExpr->pLeft->x.pSelect->pEList->a[pExpr->iColumn].pExpr ); } - if( op==TK_VECTOR ){ + if( op==TK_VECTOR + || (op==TK_FUNCTION && pExpr->affExpr==SQLITE_AFF_DEFER) + ){ assert( ExprUseXList(pExpr) ); return sqlite3ExprAffinity(pExpr->x.pList->a[0].pExpr); } @@ -109799,7 +110614,9 @@ SQLITE_PRIVATE CollSeq *sqlite3ExprCollSeq(Parse *pParse, const Expr *pExpr){ p = p->pLeft; continue; } - if( op==TK_VECTOR ){ + if( op==TK_VECTOR + || (op==TK_FUNCTION && p->affExpr==SQLITE_AFF_DEFER) + ){ assert( ExprUseXList(p) ); p = p->x.pList->a[0].pExpr; continue; @@ -110673,7 +111490,7 @@ SQLITE_PRIVATE Expr *sqlite3ExprAnd(Parse *pParse, Expr *pLeft, Expr *pRight){ return pLeft; }else{ u32 f = pLeft->flags | pRight->flags; - if( (f&(EP_OuterON|EP_InnerON|EP_IsFalse))==EP_IsFalse + if( (f&(EP_OuterON|EP_InnerON|EP_IsFalse|EP_HasFunc))==EP_IsFalse && !IN_RENAME_OBJECT ){ sqlite3ExprDeferredDelete(pParse, pLeft); @@ -111271,7 +112088,7 @@ static Expr *exprDup( SQLITE_PRIVATE With *sqlite3WithDup(sqlite3 *db, With *p){ With *pRet = 0; if( p ){ - sqlite3_int64 nByte = sizeof(*p) + sizeof(p->a[0]) * (p->nCte-1); + sqlite3_int64 nByte = SZ_WITH(p->nCte); pRet = sqlite3DbMallocZero(db, nByte); if( pRet ){ int i; @@ -111382,7 +112199,6 @@ SQLITE_PRIVATE ExprList *sqlite3ExprListDup(sqlite3 *db, const ExprList *p, int } pItem->zEName = sqlite3DbStrDup(db, pOldItem->zEName); pItem->fg = pOldItem->fg; - pItem->fg.done = 0; pItem->u = pOldItem->u; } return pNew; @@ -111399,11 +112215,9 @@ SQLITE_PRIVATE ExprList *sqlite3ExprListDup(sqlite3 *db, const ExprList *p, int SQLITE_PRIVATE SrcList *sqlite3SrcListDup(sqlite3 *db, const SrcList *p, int flags){ SrcList *pNew; int i; - int nByte; assert( db!=0 ); if( p==0 ) return 0; - nByte = sizeof(*p) + (p->nSrc>0 ? sizeof(p->a[0]) * (p->nSrc-1) : 0); - pNew = sqlite3DbMallocRawNN(db, nByte ); + pNew = sqlite3DbMallocRawNN(db, SZ_SRCLIST(p->nSrc) ); if( pNew==0 ) return 0; pNew->nSrc = pNew->nAlloc = p->nSrc; for(i=0; inSrc; i++){ @@ -111465,7 +112279,7 @@ SQLITE_PRIVATE IdList *sqlite3IdListDup(sqlite3 *db, const IdList *p){ int i; assert( db!=0 ); if( p==0 ) return 0; - pNew = sqlite3DbMallocRawNN(db, sizeof(*pNew)+(p->nId-1)*sizeof(p->a[0]) ); + pNew = sqlite3DbMallocRawNN(db, SZ_IDLIST(p->nId)); if( pNew==0 ) return 0; pNew->nId = p->nId; for(i=0; inId; i++){ @@ -111497,7 +112311,7 @@ SQLITE_PRIVATE Select *sqlite3SelectDup(sqlite3 *db, const Select *pDup, int fla pNew->pLimit = sqlite3ExprDup(db, p->pLimit, flags); pNew->iLimit = 0; pNew->iOffset = 0; - pNew->selFlags = p->selFlags & ~SF_UsesEphemeral; + pNew->selFlags = p->selFlags & ~(u32)SF_UsesEphemeral; pNew->addrOpenEphm[0] = -1; pNew->addrOpenEphm[1] = -1; pNew->nSelectRow = p->nSelectRow; @@ -111549,7 +112363,7 @@ SQLITE_PRIVATE SQLITE_NOINLINE ExprList *sqlite3ExprListAppendNew( struct ExprList_item *pItem; ExprList *pList; - pList = sqlite3DbMallocRawNN(db, sizeof(ExprList)+sizeof(pList->a[0])*4 ); + pList = sqlite3DbMallocRawNN(db, SZ_EXPRLIST(4)); if( pList==0 ){ sqlite3ExprDelete(db, pExpr); return 0; @@ -111569,8 +112383,7 @@ SQLITE_PRIVATE SQLITE_NOINLINE ExprList *sqlite3ExprListAppendGrow( struct ExprList_item *pItem; ExprList *pNew; pList->nAlloc *= 2; - pNew = sqlite3DbRealloc(db, pList, - sizeof(*pList)+(pList->nAlloc-1)*sizeof(pList->a[0])); + pNew = sqlite3DbRealloc(db, pList, SZ_EXPRLIST(pList->nAlloc)); if( pNew==0 ){ sqlite3ExprListDelete(db, pList); sqlite3ExprDelete(db, pExpr); @@ -112499,13 +113312,7 @@ SQLITE_PRIVATE const char *sqlite3RowidAlias(Table *pTab){ int ii; assert( VisibleRowid(pTab) ); for(ii=0; iinCol; iCol++){ - if( sqlite3_stricmp(azOpt[ii], pTab->aCol[iCol].zCnName)==0 ) break; - } - if( iCol==pTab->nCol ){ - return azOpt[ii]; - } + if( sqlite3ColumnIndex(pTab, azOpt[ii])<0 ) return azOpt[ii]; } return 0; } @@ -112909,7 +113716,7 @@ static char *exprINAffinity(Parse *pParse, const Expr *pExpr){ char *zRet; assert( pExpr->op==TK_IN ); - zRet = sqlite3DbMallocRaw(pParse->db, nVal+1); + zRet = sqlite3DbMallocRaw(pParse->db, 1+(i64)nVal); if( zRet ){ int i; for(i=0; idb, pCopy); sqlite3DbFree(pParse->db, dest.zAffSdst); if( addrBloom ){ + /* Remember that location of the Bloom filter in the P3 operand + ** of the OP_Once that began this subroutine. tag-202407032019 */ sqlite3VdbeGetOp(v, addrOnce)->p3 = dest.iSDParm2; if( dest.iSDParm2==0 ){ - sqlite3VdbeChangeToNoop(v, addrBloom); - }else{ - sqlite3VdbeGetOp(v, addrOnce)->p3 = dest.iSDParm2; + /* If the Bloom filter won't actually be used, keep it small */ + sqlite3VdbeGetOp(v, addrBloom)->p1 = 10; } } if( rc ){ @@ -113620,7 +114428,7 @@ static void sqlite3ExprCodeIN( if( ExprHasProperty(pExpr, EP_Subrtn) ){ const VdbeOp *pOp = sqlite3VdbeGetOp(v, pExpr->y.sub.iAddr); assert( pOp->opcode==OP_Once || pParse->nErr ); - if( pOp->opcode==OP_Once && pOp->p3>0 ){ + if( pOp->opcode==OP_Once && pOp->p3>0 ){ /* tag-202407032019 */ assert( OptimizationEnabled(pParse->db, SQLITE_BloomFilter) ); sqlite3VdbeAddOp4Int(v, OP_Filter, pOp->p3, destIfFalse, rLhs, nVector); VdbeCoverage(v); @@ -114212,7 +115020,7 @@ static SQLITE_NOINLINE int sqlite3IndexedExprLookup( /* -** Expresion pExpr is guaranteed to be a TK_COLUMN or equivalent. This +** Expression pExpr is guaranteed to be a TK_COLUMN or equivalent. This ** function checks the Parse.pIdxPartExpr list to see if this column ** can be replaced with a constant value. If so, it generates code to ** put the constant value in a register (ideally, but not necessarily, @@ -114436,6 +115244,12 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) sqlite3VdbeLoadString(v, target, pExpr->u.zToken); return target; } + case TK_NULLS: { + /* Set a range of registers to NULL. pExpr->y.nReg registers starting + ** with target */ + sqlite3VdbeAddOp3(v, OP_Null, 0, target, target + pExpr->y.nReg - 1); + return target; + } default: { /* Make NULL the default case so that if a bug causes an illegal ** Expr node to be passed into this function, it will be handled @@ -115120,6 +115934,25 @@ SQLITE_PRIVATE int sqlite3ExprCodeRunJustOnce( return regDest; } +/* +** Make arrangements to invoke OP_Null on a range of registers +** during initialization. +*/ +SQLITE_PRIVATE SQLITE_NOINLINE void sqlite3ExprNullRegisterRange( + Parse *pParse, /* Parsing context */ + int iReg, /* First register to set to NULL */ + int nReg /* Number of sequential registers to NULL out */ +){ + u8 okConstFactor = pParse->okConstFactor; + Expr t; + memset(&t, 0, sizeof(t)); + t.op = TK_NULLS; + t.y.nReg = nReg; + pParse->okConstFactor = 1; + sqlite3ExprCodeRunJustOnce(pParse, &t, iReg); + pParse->okConstFactor = okConstFactor; +} + /* ** Generate code to evaluate an expression and store the results ** into a register. Return the register number where the results @@ -115469,11 +116302,11 @@ SQLITE_PRIVATE void sqlite3ExprIfTrue(Parse *pParse, Expr *pExpr, int dest, int assert( TK_ISNULL==OP_IsNull ); testcase( op==TK_ISNULL ); assert( TK_NOTNULL==OP_NotNull ); testcase( op==TK_NOTNULL ); r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); - sqlite3VdbeTypeofColumn(v, r1); + assert( regFree1==0 || regFree1==r1 ); + if( regFree1 ) sqlite3VdbeTypeofColumn(v, r1); sqlite3VdbeAddOp2(v, op, r1, dest); VdbeCoverageIf(v, op==TK_ISNULL); VdbeCoverageIf(v, op==TK_NOTNULL); - testcase( regFree1==0 ); break; } case TK_BETWEEN: { @@ -115644,11 +116477,11 @@ SQLITE_PRIVATE void sqlite3ExprIfFalse(Parse *pParse, Expr *pExpr, int dest, int case TK_ISNULL: case TK_NOTNULL: { r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); - sqlite3VdbeTypeofColumn(v, r1); + assert( regFree1==0 || regFree1==r1 ); + if( regFree1 ) sqlite3VdbeTypeofColumn(v, r1); sqlite3VdbeAddOp2(v, op, r1, dest); testcase( op==TK_ISNULL ); VdbeCoverageIf(v, op==TK_ISNULL); testcase( op==TK_NOTNULL ); VdbeCoverageIf(v, op==TK_NOTNULL); - testcase( regFree1==0 ); break; } case TK_BETWEEN: { @@ -116548,7 +117381,9 @@ static void findOrCreateAggInfoColumn( ){ struct AggInfo_col *pCol; int k; + int mxTerm = pParse->db->aLimit[SQLITE_LIMIT_COLUMN]; + assert( mxTerm <= SMXV(i16) ); assert( pAggInfo->iFirstReg==0 ); pCol = pAggInfo->aCol; for(k=0; knColumn; k++, pCol++){ @@ -116566,6 +117401,10 @@ static void findOrCreateAggInfoColumn( assert( pParse->db->mallocFailed ); return; } + if( k>mxTerm ){ + sqlite3ErrorMsg(pParse, "more than %d aggregate terms", mxTerm); + k = mxTerm; + } pCol = &pAggInfo->aCol[k]; assert( ExprUseYTab(pExpr) ); pCol->pTab = pExpr->y.pTab; @@ -116599,6 +117438,7 @@ static void findOrCreateAggInfoColumn( if( pExpr->op==TK_COLUMN ){ pExpr->op = TK_AGG_COLUMN; } + assert( k <= SMXV(pExpr->iAgg) ); pExpr->iAgg = (i16)k; } @@ -116683,13 +117523,19 @@ static int analyzeAggregate(Walker *pWalker, Expr *pExpr){ ** function that is already in the pAggInfo structure */ struct AggInfo_func *pItem = pAggInfo->aFunc; + int mxTerm = pParse->db->aLimit[SQLITE_LIMIT_COLUMN]; + assert( mxTerm <= SMXV(i16) ); for(i=0; inFunc; i++, pItem++){ if( NEVER(pItem->pFExpr==pExpr) ) break; if( sqlite3ExprCompare(0, pItem->pFExpr, pExpr, -1)==0 ){ break; } } - if( i>=pAggInfo->nFunc ){ + if( i>mxTerm ){ + sqlite3ErrorMsg(pParse, "more than %d aggregate terms", mxTerm); + i = mxTerm; + assert( inFunc ); + }else if( i>=pAggInfo->nFunc ){ /* pExpr is original. Make a new entry in pAggInfo->aFunc[] */ u8 enc = ENC(pParse->db); @@ -116743,6 +117589,7 @@ static int analyzeAggregate(Walker *pWalker, Expr *pExpr){ */ assert( !ExprHasProperty(pExpr, EP_TokenOnly|EP_Reduced) ); ExprSetVVAProperty(pExpr, EP_NoReduce); + assert( i <= SMXV(pExpr->iAgg) ); pExpr->iAgg = (i16)i; pExpr->pAggInfo = pAggInfo; return WRC_Prune; @@ -117453,13 +118300,13 @@ SQLITE_PRIVATE void sqlite3AlterBeginAddColumn(Parse *pParse, SrcList *pSrc){ assert( pNew->nCol>0 ); nAlloc = (((pNew->nCol-1)/8)*8)+8; assert( nAlloc>=pNew->nCol && nAlloc%8==0 && nAlloc-pNew->nCol<8 ); - pNew->aCol = (Column*)sqlite3DbMallocZero(db, sizeof(Column)*nAlloc); + pNew->aCol = (Column*)sqlite3DbMallocZero(db, sizeof(Column)*(u32)nAlloc); pNew->zName = sqlite3MPrintf(db, "sqlite_altertab_%s", pTab->zName); if( !pNew->aCol || !pNew->zName ){ assert( db->mallocFailed ); goto exit_begin_add_column; } - memcpy(pNew->aCol, pTab->aCol, sizeof(Column)*pNew->nCol); + memcpy(pNew->aCol, pTab->aCol, sizeof(Column)*(size_t)pNew->nCol); for(i=0; inCol; i++){ Column *pCol = &pNew->aCol[i]; pCol->zCnName = sqlite3DbStrDup(db, pCol->zCnName); @@ -117554,10 +118401,8 @@ SQLITE_PRIVATE void sqlite3AlterRenameColumn( ** altered. Set iCol to be the index of the column being renamed */ zOld = sqlite3NameFromToken(db, pOld); if( !zOld ) goto exit_rename_column; - for(iCol=0; iColnCol; iCol++){ - if( 0==sqlite3StrICmp(pTab->aCol[iCol].zCnName, zOld) ) break; - } - if( iCol==pTab->nCol ){ + iCol = sqlite3ColumnIndex(pTab, zOld); + if( iCol<0 ){ sqlite3ErrorMsg(pParse, "no such column: \"%T\"", pOld); goto exit_rename_column; } @@ -118060,6 +118905,7 @@ static int renameParseSql( int bTemp /* True if SQL is from temp schema */ ){ int rc; + u64 flags; sqlite3ParseObjectInit(p, db); if( zSql==0 ){ @@ -118068,11 +118914,21 @@ static int renameParseSql( if( sqlite3StrNICmp(zSql,"CREATE ",7)!=0 ){ return SQLITE_CORRUPT_BKPT; } - db->init.iDb = bTemp ? 1 : sqlite3FindDbName(db, zDb); + if( bTemp ){ + db->init.iDb = 1; + }else{ + int iDb = sqlite3FindDbName(db, zDb); + assert( iDb>=0 && iDb<=0xff ); + db->init.iDb = (u8)iDb; + } p->eParseMode = PARSE_MODE_RENAME; p->db = db; p->nQueryLoop = 1; + flags = db->flags; + testcase( (db->flags & SQLITE_Comments)==0 && strstr(zSql," /* ")!=0 ); + db->flags |= SQLITE_Comments; rc = sqlite3RunParser(p, zSql); + db->flags = flags; if( db->mallocFailed ) rc = SQLITE_NOMEM; if( rc==SQLITE_OK && NEVER(p->pNewTable==0 && p->pNewIndex==0 && p->pNewTrigger==0) @@ -118135,10 +118991,11 @@ static int renameEditSql( nQuot = sqlite3Strlen30(zQuot)-1; } - assert( nQuot>=nNew ); - zOut = sqlite3DbMallocZero(db, nSql + pRename->nList*nQuot + 1); + assert( nQuot>=nNew && nSql>=0 && nNew>=0 ); + zOut = sqlite3DbMallocZero(db, (u64)nSql + pRename->nList*(u64)nQuot + 1); }else{ - zOut = (char*)sqlite3DbMallocZero(db, (nSql*2+1) * 3); + assert( nSql>0 ); + zOut = (char*)sqlite3DbMallocZero(db, (2*(u64)nSql + 1) * 3); if( zOut ){ zBuf1 = &zOut[nSql*2+1]; zBuf2 = &zOut[nSql*4+2]; @@ -118150,16 +119007,17 @@ static int renameEditSql( ** with the new column name, or with single-quoted versions of themselves. ** All that remains is to construct and return the edited SQL string. */ if( zOut ){ - int nOut = nSql; - memcpy(zOut, zSql, nSql); + i64 nOut = nSql; + assert( nSql>0 ); + memcpy(zOut, zSql, (size_t)nSql); while( pRename->pList ){ int iOff; /* Offset of token to replace in zOut */ - u32 nReplace; + i64 nReplace; const char *zReplace; RenameToken *pBest = renameColumnTokenNext(pRename); if( zNew ){ - if( bQuote==0 && sqlite3IsIdChar(*pBest->t.z) ){ + if( bQuote==0 && sqlite3IsIdChar(*(u8*)pBest->t.z) ){ nReplace = nNew; zReplace = zNew; }else{ @@ -118177,14 +119035,15 @@ static int renameEditSql( memcpy(zBuf1, pBest->t.z, pBest->t.n); zBuf1[pBest->t.n] = 0; sqlite3Dequote(zBuf1); - sqlite3_snprintf(nSql*2, zBuf2, "%Q%s", zBuf1, + assert( nSql < 0x15555554 /* otherwise malloc would have failed */ ); + sqlite3_snprintf((int)(nSql*2), zBuf2, "%Q%s", zBuf1, pBest->t.z[pBest->t.n]=='\'' ? " " : "" ); zReplace = zBuf2; nReplace = sqlite3Strlen30(zReplace); } - iOff = pBest->t.z - zSql; + iOff = (int)(pBest->t.z - zSql); if( pBest->t.n!=nReplace ){ memmove(&zOut[iOff + nReplace], &zOut[iOff + pBest->t.n], nOut - (iOff + pBest->t.n) @@ -118210,11 +119069,12 @@ static int renameEditSql( ** Set all pEList->a[].fg.eEName fields in the expression-list to val. */ static void renameSetENames(ExprList *pEList, int val){ + assert( val==ENAME_NAME || val==ENAME_TAB || val==ENAME_SPAN ); if( pEList ){ int i; for(i=0; inExpr; i++){ assert( val==ENAME_NAME || pEList->a[i].fg.eEName==ENAME_NAME ); - pEList->a[i].fg.eEName = val; + pEList->a[i].fg.eEName = val&0x3; } } } @@ -118471,7 +119331,7 @@ static void renameColumnFunc( if( sParse.pNewTable ){ if( IsView(sParse.pNewTable) ){ Select *pSelect = sParse.pNewTable->u.view.pSelect; - pSelect->selFlags &= ~SF_View; + pSelect->selFlags &= ~(u32)SF_View; sParse.rc = SQLITE_OK; sqlite3SelectPrep(&sParse, pSelect, 0); rc = (db->mallocFailed ? SQLITE_NOMEM : sParse.rc); @@ -118689,7 +119549,7 @@ static void renameTableFunc( sNC.pParse = &sParse; assert( pSelect->selFlags & SF_View ); - pSelect->selFlags &= ~SF_View; + pSelect->selFlags &= ~(u32)SF_View; sqlite3SelectPrep(&sParse, pTab->u.view.pSelect, &sNC); if( sParse.nErr ){ rc = sParse.rc; @@ -118862,7 +119722,7 @@ static void renameQuotefixFunc( if( sParse.pNewTable ){ if( IsView(sParse.pNewTable) ){ Select *pSelect = sParse.pNewTable->u.view.pSelect; - pSelect->selFlags &= ~SF_View; + pSelect->selFlags &= ~(u32)SF_View; sParse.rc = SQLITE_OK; sqlite3SelectPrep(&sParse, pSelect, 0); rc = (db->mallocFailed ? SQLITE_NOMEM : sParse.rc); @@ -118961,10 +119821,10 @@ static void renameTableTest( if( zDb && zInput ){ int rc; Parse sParse; - int flags = db->flags; + u64 flags = db->flags; if( bNoDQS ) db->flags &= ~(SQLITE_DqsDML|SQLITE_DqsDDL); rc = renameParseSql(&sParse, zDb, db, zInput, bTemp); - db->flags |= (flags & (SQLITE_DqsDML|SQLITE_DqsDDL)); + db->flags = flags; if( rc==SQLITE_OK ){ if( isLegacy==0 && sParse.pNewTable && IsView(sParse.pNewTable) ){ NameContext sNC; @@ -119456,7 +120316,8 @@ static void openStatTable( sqlite3NestedParse(pParse, "CREATE TABLE %Q.%s(%s)", pDb->zDbSName, zTab, aTable[i].zCols ); - aRoot[i] = (u32)pParse->regRoot; + assert( pParse->isCreate || pParse->nErr ); + aRoot[i] = (u32)pParse->u1.cr.regRoot; aCreateTbl[i] = OPFLAG_P2ISREG; } }else{ @@ -119647,7 +120508,7 @@ static void statInit( int nCol; /* Number of columns in index being sampled */ int nKeyCol; /* Number of key columns */ int nColUp; /* nCol rounded up for alignment */ - int n; /* Bytes of space to allocate */ + i64 n; /* Bytes of space to allocate */ sqlite3 *db = sqlite3_context_db_handle(context); /* Database connection */ #ifdef SQLITE_ENABLE_STAT4 /* Maximum number of samples. 0 if STAT4 data is not collected */ @@ -119683,7 +120544,7 @@ static void statInit( p->db = db; p->nEst = sqlite3_value_int64(argv[2]); p->nRow = 0; - p->nLimit = sqlite3_value_int64(argv[3]); + p->nLimit = sqlite3_value_int(argv[3]); p->nCol = nCol; p->nKeyCol = nKeyCol; p->nSkipAhead = 0; @@ -120816,16 +121677,6 @@ static void decodeIntArray( while( z[0]!=0 && z[0]!=' ' ) z++; while( z[0]==' ' ) z++; } - - /* Set the bLowQual flag if the peak number of rows obtained - ** from a full equality match is so large that a full table scan - ** seems likely to be faster than using the index. - */ - if( aLog[0] > 66 /* Index has more than 100 rows */ - && aLog[0] <= aLog[nOut-1] /* And only a single value seen */ - ){ - pIndex->bLowQual = 1; - } } } @@ -121421,7 +122272,7 @@ static void attachFunc( if( aNew==0 ) return; memcpy(aNew, db->aDb, sizeof(db->aDb[0])*2); }else{ - aNew = sqlite3DbRealloc(db, db->aDb, sizeof(db->aDb[0])*(db->nDb+1) ); + aNew = sqlite3DbRealloc(db, db->aDb, sizeof(db->aDb[0])*(1+(i64)db->nDb)); if( aNew==0 ) return; } db->aDb = aNew; @@ -121492,6 +122343,13 @@ static void attachFunc( sqlite3BtreeEnterAll(db); db->init.iDb = 0; db->mDbFlags &= ~(DBFLAG_SchemaKnownOk); +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + if( db->setlkFlags & SQLITE_SETLK_BLOCK_ON_CONNECT ){ + int val = 1; + sqlite3_file *fd = sqlite3PagerFile(sqlite3BtreePager(pNew->pBt)); + sqlite3OsFileControlHint(fd, SQLITE_FCNTL_BLOCK_ON_CONNECT, &val); + } +#endif if( !REOPEN_AS_MEMDB(db) ){ rc = sqlite3Init(db, &zErrDyn); } @@ -122214,6 +123072,7 @@ static SQLITE_NOINLINE void lockTable( } } + assert( pToplevel->nTableLock < 0x7fff0000 ); nBytes = sizeof(TableLock) * (pToplevel->nTableLock+1); pToplevel->aTableLock = sqlite3DbReallocOrFree(pToplevel->db, pToplevel->aTableLock, nBytes); @@ -122314,10 +123173,12 @@ SQLITE_PRIVATE void sqlite3FinishCoding(Parse *pParse){ || sqlite3VdbeAssertMayAbort(v, pParse->mayAbort)); if( v ){ if( pParse->bReturning ){ - Returning *pReturning = pParse->u1.pReturning; + Returning *pReturning; int addrRewind; int reg; + assert( !pParse->isCreate ); + pReturning = pParse->u1.d.pReturning; if( pReturning->nRetCol ){ sqlite3VdbeAddOp0(v, OP_FkCheck); addrRewind = @@ -122393,7 +123254,9 @@ SQLITE_PRIVATE void sqlite3FinishCoding(Parse *pParse){ } if( pParse->bReturning ){ - Returning *pRet = pParse->u1.pReturning; + Returning *pRet; + assert( !pParse->isCreate ); + pRet = pParse->u1.d.pReturning; if( pRet->nRetCol ){ sqlite3VdbeAddOp2(v, OP_OpenEphemeral, pRet->iRetCur, pRet->nRetCol); } @@ -123208,10 +124071,16 @@ SQLITE_PRIVATE Index *sqlite3PrimaryKeyIndex(Table *pTab){ ** find the (first) offset of that column in index pIdx. Or return -1 ** if column iCol is not used in index pIdx. */ -SQLITE_PRIVATE i16 sqlite3TableColumnToIndex(Index *pIdx, i16 iCol){ +SQLITE_PRIVATE int sqlite3TableColumnToIndex(Index *pIdx, int iCol){ int i; + i16 iCol16; + assert( iCol>=(-1) && iCol<=SQLITE_MAX_COLUMN ); + assert( pIdx->nColumn<=SQLITE_MAX_COLUMN+1 ); + iCol16 = iCol; for(i=0; inColumn; i++){ - if( iCol==pIdx->aiColumn[i] ) return i; + if( iCol16==pIdx->aiColumn[i] ){ + return i; + } } return -1; } @@ -123465,8 +124334,9 @@ SQLITE_PRIVATE void sqlite3StartTable( /* If the file format and encoding in the database have not been set, ** set them now. */ - reg1 = pParse->regRowid = ++pParse->nMem; - reg2 = pParse->regRoot = ++pParse->nMem; + assert( pParse->isCreate ); + reg1 = pParse->u1.cr.regRowid = ++pParse->nMem; + reg2 = pParse->u1.cr.regRoot = ++pParse->nMem; reg3 = ++pParse->nMem; sqlite3VdbeAddOp3(v, OP_ReadCookie, iDb, reg3, BTREE_FILE_FORMAT); sqlite3VdbeUsesBtree(v, iDb); @@ -123481,8 +124351,8 @@ SQLITE_PRIVATE void sqlite3StartTable( ** The record created does not contain anything yet. It will be replaced ** by the real entry in code generated at sqlite3EndTable(). ** - ** The rowid for the new entry is left in register pParse->regRowid. - ** The root page number of the new table is left in reg pParse->regRoot. + ** The rowid for the new entry is left in register pParse->u1.cr.regRowid. + ** The root page of the new table is left in reg pParse->u1.cr.regRoot. ** The rowid and root page number values are needed by the code that ** sqlite3EndTable will generate. */ @@ -123493,7 +124363,7 @@ SQLITE_PRIVATE void sqlite3StartTable( #endif { assert( !pParse->bReturning ); - pParse->u1.addrCrTab = + pParse->u1.cr.addrCrTab = sqlite3VdbeAddOp3(v, OP_CreateBtree, iDb, reg2, BTREE_INTKEY); } sqlite3OpenSchemaTable(pParse, iDb); @@ -123571,7 +124441,8 @@ SQLITE_PRIVATE void sqlite3AddReturning(Parse *pParse, ExprList *pList){ sqlite3ExprListDelete(db, pList); return; } - pParse->u1.pReturning = pRet; + assert( !pParse->isCreate ); + pParse->u1.d.pReturning = pRet; pRet->pParse = pParse; pRet->pReturnEL = pList; sqlite3ParserAddCleanup(pParse, sqlite3DeleteReturning, pRet); @@ -123613,7 +124484,6 @@ SQLITE_PRIVATE void sqlite3AddColumn(Parse *pParse, Token sName, Token sType){ char *zType; Column *pCol; sqlite3 *db = pParse->db; - u8 hName; Column *aNew; u8 eType = COLTYPE_CUSTOM; u8 szEst = 1; @@ -123667,13 +124537,10 @@ SQLITE_PRIVATE void sqlite3AddColumn(Parse *pParse, Token sName, Token sType){ memcpy(z, sName.z, sName.n); z[sName.n] = 0; sqlite3Dequote(z); - hName = sqlite3StrIHash(z); - for(i=0; inCol; i++){ - if( p->aCol[i].hName==hName && sqlite3StrICmp(z, p->aCol[i].zCnName)==0 ){ - sqlite3ErrorMsg(pParse, "duplicate column name: %s", z); - sqlite3DbFree(db, z); - return; - } + if( p->nCol && sqlite3ColumnIndex(p, z)>=0 ){ + sqlite3ErrorMsg(pParse, "duplicate column name: %s", z); + sqlite3DbFree(db, z); + return; } aNew = sqlite3DbRealloc(db,p->aCol,((i64)p->nCol+1)*sizeof(p->aCol[0])); if( aNew==0 ){ @@ -123684,7 +124551,7 @@ SQLITE_PRIVATE void sqlite3AddColumn(Parse *pParse, Token sName, Token sType){ pCol = &p->aCol[p->nCol]; memset(pCol, 0, sizeof(p->aCol[0])); pCol->zCnName = z; - pCol->hName = hName; + pCol->hName = sqlite3StrIHash(z); sqlite3ColumnPropertiesFromName(p, pCol); if( sType.n==0 ){ @@ -123708,9 +124575,14 @@ SQLITE_PRIVATE void sqlite3AddColumn(Parse *pParse, Token sName, Token sType){ pCol->affinity = sqlite3AffinityType(zType, pCol); pCol->colFlags |= COLFLAG_HASTYPE; } + if( p->nCol<=0xff ){ + u8 h = pCol->hName % sizeof(p->aHx); + p->aHx[h] = p->nCol; + } p->nCol++; p->nNVCol++; - pParse->constraintName.n = 0; + assert( pParse->isCreate ); + pParse->u1.cr.constraintName.n = 0; } /* @@ -123974,15 +124846,11 @@ SQLITE_PRIVATE void sqlite3AddPrimaryKey( assert( pCExpr!=0 ); sqlite3StringToId(pCExpr); if( pCExpr->op==TK_ID ){ - const char *zCName; assert( !ExprHasProperty(pCExpr, EP_IntValue) ); - zCName = pCExpr->u.zToken; - for(iCol=0; iColnCol; iCol++){ - if( sqlite3StrICmp(zCName, pTab->aCol[iCol].zCnName)==0 ){ - pCol = &pTab->aCol[iCol]; - makeColumnPartOfPrimaryKey(pParse, pCol); - break; - } + iCol = sqlite3ColumnIndex(pTab, pCExpr->u.zToken); + if( iCol>=0 ){ + pCol = &pTab->aCol[iCol]; + makeColumnPartOfPrimaryKey(pParse, pCol); } } } @@ -124034,8 +124902,10 @@ SQLITE_PRIVATE void sqlite3AddCheckConstraint( && !sqlite3BtreeIsReadonly(db->aDb[db->init.iDb].pBt) ){ pTab->pCheck = sqlite3ExprListAppend(pParse, pTab->pCheck, pCheckExpr); - if( pParse->constraintName.n ){ - sqlite3ExprListSetName(pParse, pTab->pCheck, &pParse->constraintName, 1); + assert( pParse->isCreate ); + if( pParse->u1.cr.constraintName.n ){ + sqlite3ExprListSetName(pParse, pTab->pCheck, + &pParse->u1.cr.constraintName, 1); }else{ Token t; for(zStart++; sqlite3Isspace(zStart[0]); zStart++){} @@ -124230,7 +125100,8 @@ static void identPut(char *z, int *pIdx, char *zSignedIdent){ ** from sqliteMalloc() and must be freed by the calling function. */ static char *createTableStmt(sqlite3 *db, Table *p){ - int i, k, n; + int i, k, len; + i64 n; char *zStmt; char *zSep, *zSep2, *zEnd; Column *pCol; @@ -124254,8 +125125,9 @@ static char *createTableStmt(sqlite3 *db, Table *p){ sqlite3OomFault(db); return 0; } - sqlite3_snprintf(n, zStmt, "CREATE TABLE "); - k = sqlite3Strlen30(zStmt); + assert( n>14 && n<=0x7fffffff ); + memcpy(zStmt, "CREATE TABLE ", 13); + k = 13; identPut(zStmt, &k, p->zName); zStmt[k++] = '('; for(pCol=p->aCol, i=0; inCol; i++, pCol++){ @@ -124267,13 +125139,15 @@ static char *createTableStmt(sqlite3 *db, Table *p){ /* SQLITE_AFF_REAL */ " REAL", /* SQLITE_AFF_FLEXNUM */ " NUM", }; - int len; const char *zType; - sqlite3_snprintf(n-k, &zStmt[k], zSep); - k += sqlite3Strlen30(&zStmt[k]); + len = sqlite3Strlen30(zSep); + assert( k+lenzCnName); + assert( kaffinity-SQLITE_AFF_BLOB >= 0 ); assert( pCol->affinity-SQLITE_AFF_BLOB < ArraySize(azType) ); testcase( pCol->affinity==SQLITE_AFF_BLOB ); @@ -124288,11 +125162,14 @@ static char *createTableStmt(sqlite3 *db, Table *p){ assert( pCol->affinity==SQLITE_AFF_BLOB || pCol->affinity==SQLITE_AFF_FLEXNUM || pCol->affinity==sqlite3AffinityType(zType, 0) ); + assert( k+lennColumn>=N ) return SQLITE_OK; + db = pParse->db; + assert( N>0 ); + assert( N <= SQLITE_MAX_COLUMN*2 /* tag-20250221-1 */ ); + testcase( N==2*pParse->db->aLimit[SQLITE_LIMIT_COLUMN] ); assert( pIdx->isResized==0 ); - nByte = (sizeof(char*) + sizeof(LogEst) + sizeof(i16) + 1)*N; + nByte = (sizeof(char*) + sizeof(LogEst) + sizeof(i16) + 1)*(u64)N; zExtra = sqlite3DbMallocZero(db, nByte); if( zExtra==0 ) return SQLITE_NOMEM_BKPT; memcpy(zExtra, pIdx->azColl, sizeof(char*)*pIdx->nColumn); @@ -124319,7 +125201,7 @@ static int resizeIndexObject(sqlite3 *db, Index *pIdx, int N){ zExtra += sizeof(i16)*N; memcpy(zExtra, pIdx->aSortOrder, pIdx->nColumn); pIdx->aSortOrder = (u8*)zExtra; - pIdx->nColumn = N; + pIdx->nColumn = (u16)N; /* See tag-20250221-1 above for proof of safety */ pIdx->isResized = 1; return SQLITE_OK; } @@ -124485,9 +125367,9 @@ static void convertToWithoutRowidTable(Parse *pParse, Table *pTab){ ** into BTREE_BLOBKEY. */ assert( !pParse->bReturning ); - if( pParse->u1.addrCrTab ){ + if( pParse->u1.cr.addrCrTab ){ assert( v ); - sqlite3VdbeChangeP3(v, pParse->u1.addrCrTab, BTREE_BLOBKEY); + sqlite3VdbeChangeP3(v, pParse->u1.cr.addrCrTab, BTREE_BLOBKEY); } /* Locate the PRIMARY KEY index. Or, if this table was originally @@ -124573,14 +125455,14 @@ static void convertToWithoutRowidTable(Parse *pParse, Table *pTab){ pIdx->nColumn = pIdx->nKeyCol; continue; } - if( resizeIndexObject(db, pIdx, pIdx->nKeyCol+n) ) return; + if( resizeIndexObject(pParse, pIdx, pIdx->nKeyCol+n) ) return; for(i=0, j=pIdx->nKeyCol; inKeyCol, pPk, i) ){ testcase( hasColumn(pIdx->aiColumn, pIdx->nKeyCol, pPk->aiColumn[i]) ); pIdx->aiColumn[j] = pPk->aiColumn[i]; pIdx->azColl[j] = pPk->azColl[i]; if( pPk->aSortOrder[i] ){ - /* See ticket https://www.sqlite.org/src/info/bba7b69f9849b5bf */ + /* See ticket https://sqlite.org/src/info/bba7b69f9849b5bf */ pIdx->bAscKeyBug = 1; } j++; @@ -124597,7 +125479,7 @@ static void convertToWithoutRowidTable(Parse *pParse, Table *pTab){ if( !hasColumn(pPk->aiColumn, nPk, i) && (pTab->aCol[i].colFlags & COLFLAG_VIRTUAL)==0 ) nExtra++; } - if( resizeIndexObject(db, pPk, nPk+nExtra) ) return; + if( resizeIndexObject(pParse, pPk, nPk+nExtra) ) return; for(i=0, j=nPk; inCol; i++){ if( !hasColumn(pPk->aiColumn, j, i) && (pTab->aCol[i].colFlags & COLFLAG_VIRTUAL)==0 @@ -124927,7 +125809,7 @@ SQLITE_PRIVATE void sqlite3EndTable( /* If this is a CREATE TABLE xx AS SELECT ..., execute the SELECT ** statement to populate the new table. The root-page number for the - ** new table is in register pParse->regRoot. + ** new table is in register pParse->u1.cr.regRoot. ** ** Once the SELECT has been coded by sqlite3Select(), it is in a ** suitable state to query for the column names and types to be used @@ -124958,7 +125840,8 @@ SQLITE_PRIVATE void sqlite3EndTable( regRec = ++pParse->nMem; regRowid = ++pParse->nMem; sqlite3MayAbort(pParse); - sqlite3VdbeAddOp3(v, OP_OpenWrite, iCsr, pParse->regRoot, iDb); + assert( pParse->isCreate ); + sqlite3VdbeAddOp3(v, OP_OpenWrite, iCsr, pParse->u1.cr.regRoot, iDb); sqlite3VdbeChangeP5(v, OPFLAG_P2ISREG); addrTop = sqlite3VdbeCurrentAddr(v) + 1; sqlite3VdbeAddOp3(v, OP_InitCoroutine, regYield, 0, addrTop); @@ -125003,6 +125886,7 @@ SQLITE_PRIVATE void sqlite3EndTable( ** schema table. We just need to update that slot with all ** the information we've collected. */ + assert( pParse->isCreate ); sqlite3NestedParse(pParse, "UPDATE %Q." LEGACY_SCHEMA_TABLE " SET type='%s', name=%Q, tbl_name=%Q, rootpage=#%d, sql=%Q" @@ -125011,9 +125895,9 @@ SQLITE_PRIVATE void sqlite3EndTable( zType, p->zName, p->zName, - pParse->regRoot, + pParse->u1.cr.regRoot, zStmt, - pParse->regRowid + pParse->u1.cr.regRowid ); sqlite3DbFree(db, zStmt); sqlite3ChangeCookie(pParse, iDb); @@ -125753,7 +126637,7 @@ SQLITE_PRIVATE void sqlite3CreateForeignKey( }else{ nCol = pFromCol->nExpr; } - nByte = sizeof(*pFKey) + (nCol-1)*sizeof(pFKey->aCol[0]) + pTo->n + 1; + nByte = SZ_FKEY(nCol) + pTo->n + 1; if( pToCol ){ for(i=0; inExpr; i++){ nByte += sqlite3Strlen30(pToCol->a[i].zEName) + 1; @@ -125955,7 +126839,7 @@ static void sqlite3RefillIndex(Parse *pParse, Index *pIndex, int memRootPage){ ** not work for UNIQUE constraint indexes on WITHOUT ROWID tables ** with DESC primary keys, since those indexes have there keys in ** a different order from the main table. - ** See ticket: https://www.sqlite.org/src/info/bba7b69f9849b5bf + ** See ticket: https://sqlite.org/src/info/bba7b69f9849b5bf */ sqlite3VdbeAddOp1(v, OP_SeekEnd, iIdx); } @@ -125979,13 +126863,14 @@ static void sqlite3RefillIndex(Parse *pParse, Index *pIndex, int memRootPage){ */ SQLITE_PRIVATE Index *sqlite3AllocateIndexObject( sqlite3 *db, /* Database connection */ - i16 nCol, /* Total number of columns in the index */ + int nCol, /* Total number of columns in the index */ int nExtra, /* Number of bytes of extra space to alloc */ char **ppExtra /* Pointer to the "extra" space */ ){ Index *p; /* Allocated index object */ - int nByte; /* Bytes of space for Index object + arrays */ + i64 nByte; /* Bytes of space for Index object + arrays */ + assert( nCol <= 2*db->aLimit[SQLITE_LIMIT_COLUMN] ); nByte = ROUND8(sizeof(Index)) + /* Index structure */ ROUND8(sizeof(char*)*nCol) + /* Index.azColl */ ROUND8(sizeof(LogEst)*(nCol+1) + /* Index.aiRowLogEst */ @@ -125998,8 +126883,9 @@ SQLITE_PRIVATE Index *sqlite3AllocateIndexObject( p->aiRowLogEst = (LogEst*)pExtra; pExtra += sizeof(LogEst)*(nCol+1); p->aiColumn = (i16*)pExtra; pExtra += sizeof(i16)*nCol; p->aSortOrder = (u8*)pExtra; - p->nColumn = nCol; - p->nKeyCol = nCol - 1; + assert( nCol>0 ); + p->nColumn = (u16)nCol; + p->nKeyCol = (u16)(nCol - 1); *ppExtra = ((char*)p) + nByte; } return p; @@ -126810,12 +127696,11 @@ SQLITE_PRIVATE IdList *sqlite3IdListAppend(Parse *pParse, IdList *pList, Token * sqlite3 *db = pParse->db; int i; if( pList==0 ){ - pList = sqlite3DbMallocZero(db, sizeof(IdList) ); + pList = sqlite3DbMallocZero(db, SZ_IDLIST(1)); if( pList==0 ) return 0; }else{ IdList *pNew; - pNew = sqlite3DbRealloc(db, pList, - sizeof(IdList) + pList->nId*sizeof(pList->a)); + pNew = sqlite3DbRealloc(db, pList, SZ_IDLIST(pList->nId+1)); if( pNew==0 ){ sqlite3IdListDelete(db, pList); return 0; @@ -126914,8 +127799,7 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListEnlarge( return 0; } if( nAlloc>SQLITE_MAX_SRCLIST ) nAlloc = SQLITE_MAX_SRCLIST; - pNew = sqlite3DbRealloc(db, pSrc, - sizeof(*pSrc) + (nAlloc-1)*sizeof(pSrc->a[0]) ); + pNew = sqlite3DbRealloc(db, pSrc, SZ_SRCLIST(nAlloc)); if( pNew==0 ){ assert( db->mallocFailed ); return 0; @@ -126990,7 +127874,7 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListAppend( assert( pParse->db!=0 ); db = pParse->db; if( pList==0 ){ - pList = sqlite3DbMallocRawNN(pParse->db, sizeof(SrcList) ); + pList = sqlite3DbMallocRawNN(pParse->db, SZ_SRCLIST(1)); if( pList==0 ) return 0; pList->nAlloc = 1; pList->nSrc = 1; @@ -127876,10 +128760,9 @@ SQLITE_PRIVATE With *sqlite3WithAdd( } if( pWith ){ - sqlite3_int64 nByte = sizeof(*pWith) + (sizeof(pWith->a[1]) * pWith->nCte); - pNew = sqlite3DbRealloc(db, pWith, nByte); + pNew = sqlite3DbRealloc(db, pWith, SZ_WITH(pWith->nCte+1)); }else{ - pNew = sqlite3DbMallocZero(db, sizeof(*pWith)); + pNew = sqlite3DbMallocZero(db, SZ_WITH(1)); } assert( (pNew!=0 && zName!=0) || db->mallocFailed ); @@ -129853,11 +130736,6 @@ static void substrFunc( i64 p1, p2; assert( argc==3 || argc==2 ); - if( sqlite3_value_type(argv[1])==SQLITE_NULL - || (argc==3 && sqlite3_value_type(argv[2])==SQLITE_NULL) - ){ - return; - } p0type = sqlite3_value_type(argv[0]); p1 = sqlite3_value_int64(argv[1]); if( p0type==SQLITE_BLOB ){ @@ -129875,19 +130753,23 @@ static void substrFunc( } } } -#ifdef SQLITE_SUBSTR_COMPATIBILITY - /* If SUBSTR_COMPATIBILITY is defined then substr(X,0,N) work the same as - ** as substr(X,1,N) - it returns the first N characters of X. This - ** is essentially a back-out of the bug-fix in check-in [5fc125d362df4b8] - ** from 2009-02-02 for compatibility of applications that exploited the - ** old buggy behavior. */ - if( p1==0 ) p1 = 1; /* */ -#endif if( argc==3 ){ p2 = sqlite3_value_int64(argv[2]); + if( p2==0 && sqlite3_value_type(argv[2])==SQLITE_NULL ) return; }else{ p2 = sqlite3_context_db_handle(context)->aLimit[SQLITE_LIMIT_LENGTH]; } + if( p1==0 ){ +#ifdef SQLITE_SUBSTR_COMPATIBILITY + /* If SUBSTR_COMPATIBILITY is defined then substr(X,0,N) work the same as + ** as substr(X,1,N) - it returns the first N characters of X. This + ** is essentially a back-out of the bug-fix in check-in [5fc125d362df4b8] + ** from 2009-02-02 for compatibility of applications that exploited the + ** old buggy behavior. */ + p1 = 1; /* */ +#endif + if( sqlite3_value_type(argv[1])==SQLITE_NULL ) return; + } if( p1<0 ){ p1 += len; if( p1<0 ){ @@ -130588,7 +131470,7 @@ static const char hexdigits[] = { ** Append to pStr text that is the SQL literal representation of the ** value contained in pValue. */ -SQLITE_PRIVATE void sqlite3QuoteValue(StrAccum *pStr, sqlite3_value *pValue){ +SQLITE_PRIVATE void sqlite3QuoteValue(StrAccum *pStr, sqlite3_value *pValue, int bEscape){ /* As currently implemented, the string must be initially empty. ** we might relax this requirement in the future, but that will ** require enhancements to the implementation. */ @@ -130636,7 +131518,7 @@ SQLITE_PRIVATE void sqlite3QuoteValue(StrAccum *pStr, sqlite3_value *pValue){ } case SQLITE_TEXT: { const unsigned char *zArg = sqlite3_value_text(pValue); - sqlite3_str_appendf(pStr, "%Q", zArg); + sqlite3_str_appendf(pStr, bEscape ? "%#Q" : "%Q", zArg); break; } default: { @@ -130647,6 +131529,105 @@ SQLITE_PRIVATE void sqlite3QuoteValue(StrAccum *pStr, sqlite3_value *pValue){ } } +/* +** Return true if z[] begins with N hexadecimal digits, and write +** a decoding of those digits into *pVal. Or return false if any +** one of the first N characters in z[] is not a hexadecimal digit. +*/ +static int isNHex(const char *z, int N, u32 *pVal){ + int i; + int v = 0; + for(i=0; i0 ){ + memmove(&zOut[j], &zIn[i], n); + j += n; + i += n; + } + if( zIn[i+1]=='\\' ){ + i += 2; + zOut[j++] = '\\'; + }else if( sqlite3Isxdigit(zIn[i+1]) ){ + if( !isNHex(&zIn[i+1], 4, &v) ) goto unistr_error; + i += 5; + j += sqlite3AppendOneUtf8Character(&zOut[j], v); + }else if( zIn[i+1]=='+' ){ + if( !isNHex(&zIn[i+2], 6, &v) ) goto unistr_error; + i += 8; + j += sqlite3AppendOneUtf8Character(&zOut[j], v); + }else if( zIn[i+1]=='u' ){ + if( !isNHex(&zIn[i+2], 4, &v) ) goto unistr_error; + i += 6; + j += sqlite3AppendOneUtf8Character(&zOut[j], v); + }else if( zIn[i+1]=='U' ){ + if( !isNHex(&zIn[i+2], 8, &v) ) goto unistr_error; + i += 10; + j += sqlite3AppendOneUtf8Character(&zOut[j], v); + }else{ + goto unistr_error; + } + } + zOut[j] = 0; + sqlite3_result_text64(context, zOut, j, sqlite3_free, SQLITE_UTF8); + return; + +unistr_error: + sqlite3_free(zOut); + sqlite3_result_error(context, "invalid Unicode escape", -1); + return; +} + + /* ** Implementation of the QUOTE() function. ** @@ -130656,6 +131637,10 @@ SQLITE_PRIVATE void sqlite3QuoteValue(StrAccum *pStr, sqlite3_value *pValue){ ** as needed. BLOBs are encoded as hexadecimal literals. Strings with ** embedded NUL characters cannot be represented as string literals in SQL ** and hence the returned string literal is truncated prior to the first NUL. +** +** If sqlite3_user_data() is non-zero, then the UNISTR_QUOTE() function is +** implemented instead. The difference is that UNISTR_QUOTE() uses the +** UNISTR() function to escape control characters. */ static void quoteFunc(sqlite3_context *context, int argc, sqlite3_value **argv){ sqlite3_str str; @@ -130663,7 +131648,7 @@ static void quoteFunc(sqlite3_context *context, int argc, sqlite3_value **argv){ assert( argc==1 ); UNUSED_PARAMETER(argc); sqlite3StrAccumInit(&str, db, 0, 0, db->aLimit[SQLITE_LIMIT_LENGTH]); - sqlite3QuoteValue(&str,argv[0]); + sqlite3QuoteValue(&str,argv[0],SQLITE_PTR_TO_INT(sqlite3_user_data(context))); sqlite3_result_text(context, sqlite3StrAccumFinish(&str), str.nChar, SQLITE_DYNAMIC); if( str.accError!=SQLITE_OK ){ @@ -130918,7 +131903,7 @@ static void replaceFunc( assert( zRep==sqlite3_value_text(argv[2]) ); nOut = nStr + 1; assert( nOut0 ){ + if( sqlite3_value_type(argv[i])!=SQLITE_NULL ){ + int k = sqlite3_value_bytes(argv[i]); const char *v = (const char*)sqlite3_value_text(argv[i]); if( v!=0 ){ if( j>0 && nSep>0 ){ @@ -131314,7 +132299,7 @@ static void kahanBabuskaNeumaierInit( ** that it returns NULL if it sums over no inputs. TOTAL returns ** 0.0 in that case. In addition, TOTAL always returns a float where ** SUM might return an integer if it never encounters a floating point -** value. TOTAL never fails, but SUM might through an exception if +** value. TOTAL never fails, but SUM might throw an exception if ** it overflows an integer. */ static void sumStep(sqlite3_context *context, int argc, sqlite3_value **argv){ @@ -132234,7 +133219,9 @@ SQLITE_PRIVATE void sqlite3RegisterBuiltinFunctions(void){ DFUNCTION(sqlite_version, 0, 0, 0, versionFunc ), DFUNCTION(sqlite_source_id, 0, 0, 0, sourceidFunc ), FUNCTION(sqlite_log, 2, 0, 0, errlogFunc ), + FUNCTION(unistr, 1, 0, 0, unistrFunc ), FUNCTION(quote, 1, 0, 0, quoteFunc ), + FUNCTION(unistr_quote, 1, 1, 0, quoteFunc ), VFUNCTION(last_insert_rowid, 0, 0, 0, last_insert_rowid), VFUNCTION(changes, 0, 0, 0, changes ), VFUNCTION(total_changes, 0, 0, 0, total_changes ), @@ -134521,7 +135508,7 @@ SQLITE_PRIVATE Select *sqlite3MultiValues(Parse *pParse, Select *pLeft, ExprList f = (f & pLeft->selFlags); } pSelect = sqlite3SelectNew(pParse, pRow, 0, 0, 0, 0, 0, f, 0); - pLeft->selFlags &= ~SF_MultiValue; + pLeft->selFlags &= ~(u32)SF_MultiValue; if( pSelect ){ pSelect->op = TK_ALL; pSelect->pPrior = pLeft; @@ -134903,28 +135890,22 @@ SQLITE_PRIVATE void sqlite3Insert( aTabColMap = sqlite3DbMallocZero(db, pTab->nCol*sizeof(int)); if( aTabColMap==0 ) goto insert_cleanup; for(i=0; inId; i++){ - const char *zCName = pColumn->a[i].zName; - u8 hName = sqlite3StrIHash(zCName); - for(j=0; jnCol; j++){ - if( pTab->aCol[j].hName!=hName ) continue; - if( sqlite3StrICmp(zCName, pTab->aCol[j].zCnName)==0 ){ - if( aTabColMap[j]==0 ) aTabColMap[j] = i+1; - if( i!=j ) bIdListInOrder = 0; - if( j==pTab->iPKey ){ - ipkColumn = i; assert( !withoutRowid ); - } + j = sqlite3ColumnIndex(pTab, pColumn->a[i].zName); + if( j>=0 ){ + if( aTabColMap[j]==0 ) aTabColMap[j] = i+1; + if( i!=j ) bIdListInOrder = 0; + if( j==pTab->iPKey ){ + ipkColumn = i; assert( !withoutRowid ); + } #ifndef SQLITE_OMIT_GENERATED_COLUMNS - if( pTab->aCol[j].colFlags & (COLFLAG_STORED|COLFLAG_VIRTUAL) ){ - sqlite3ErrorMsg(pParse, - "cannot INSERT into generated column \"%s\"", - pTab->aCol[j].zCnName); - goto insert_cleanup; - } -#endif - break; + if( pTab->aCol[j].colFlags & (COLFLAG_STORED|COLFLAG_VIRTUAL) ){ + sqlite3ErrorMsg(pParse, + "cannot INSERT into generated column \"%s\"", + pTab->aCol[j].zCnName); + goto insert_cleanup; } - } - if( j>=pTab->nCol ){ +#endif + }else{ if( sqlite3IsRowid(pColumn->a[i].zName) && !withoutRowid ){ ipkColumn = i; bIdListInOrder = 0; @@ -135222,7 +136203,7 @@ SQLITE_PRIVATE void sqlite3Insert( continue; }else if( pColumn==0 ){ /* Hidden columns that are not explicitly named in the INSERT - ** get there default value */ + ** get their default value */ sqlite3ExprCodeFactorable(pParse, sqlite3ColumnExpr(pTab, &pTab->aCol[i]), iRegStore); @@ -135947,7 +136928,7 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( ** could happen in any order, but they are grouped up front for ** convenience. ** - ** 2018-08-14: Ticket https://www.sqlite.org/src/info/908f001483982c43 + ** 2018-08-14: Ticket https://sqlite.org/src/info/908f001483982c43 ** The order of constraints used to have OE_Update as (2) and OE_Abort ** and so forth as (1). But apparently PostgreSQL checks the OE_Update ** constraint before any others, so it had to be moved. @@ -137757,6 +138738,8 @@ struct sqlite3_api_routines { /* Version 3.44.0 and later */ void *(*get_clientdata)(sqlite3*,const char*); int (*set_clientdata)(sqlite3*, const char*, void*, void(*)(void*)); + /* Version 3.50.0 and later */ + int (*setlk_timeout)(sqlite3*,int,int); }; /* @@ -138090,6 +139073,8 @@ typedef int (*sqlite3_loadext_entry)( /* Version 3.44.0 and later */ #define sqlite3_get_clientdata sqlite3_api->get_clientdata #define sqlite3_set_clientdata sqlite3_api->set_clientdata +/* Version 3.50.0 and later */ +#define sqlite3_setlk_timeout sqlite3_api->setlk_timeout #endif /* !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) */ #if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) @@ -138611,7 +139596,9 @@ static const sqlite3_api_routines sqlite3Apis = { sqlite3_stmt_explain, /* Version 3.44.0 and later */ sqlite3_get_clientdata, - sqlite3_set_clientdata + sqlite3_set_clientdata, + /* Version 3.50.0 and later */ + sqlite3_setlk_timeout }; /* True if x is the directory separator character @@ -139133,48 +140120,48 @@ static const char *const pragCName[] = { /* 13 */ "pk", /* 14 */ "hidden", /* table_info reuses 8 */ - /* 15 */ "schema", /* Used by: table_list */ - /* 16 */ "name", + /* 15 */ "name", /* Used by: function_list */ + /* 16 */ "builtin", /* 17 */ "type", - /* 18 */ "ncol", - /* 19 */ "wr", - /* 20 */ "strict", - /* 21 */ "seqno", /* Used by: index_xinfo */ - /* 22 */ "cid", - /* 23 */ "name", - /* 24 */ "desc", - /* 25 */ "coll", - /* 26 */ "key", - /* 27 */ "name", /* Used by: function_list */ - /* 28 */ "builtin", - /* 29 */ "type", - /* 30 */ "enc", - /* 31 */ "narg", - /* 32 */ "flags", - /* 33 */ "tbl", /* Used by: stats */ - /* 34 */ "idx", - /* 35 */ "wdth", - /* 36 */ "hght", - /* 37 */ "flgs", - /* 38 */ "seq", /* Used by: index_list */ - /* 39 */ "name", - /* 40 */ "unique", - /* 41 */ "origin", - /* 42 */ "partial", + /* 18 */ "enc", + /* 19 */ "narg", + /* 20 */ "flags", + /* 21 */ "schema", /* Used by: table_list */ + /* 22 */ "name", + /* 23 */ "type", + /* 24 */ "ncol", + /* 25 */ "wr", + /* 26 */ "strict", + /* 27 */ "seqno", /* Used by: index_xinfo */ + /* 28 */ "cid", + /* 29 */ "name", + /* 30 */ "desc", + /* 31 */ "coll", + /* 32 */ "key", + /* 33 */ "seq", /* Used by: index_list */ + /* 34 */ "name", + /* 35 */ "unique", + /* 36 */ "origin", + /* 37 */ "partial", + /* 38 */ "tbl", /* Used by: stats */ + /* 39 */ "idx", + /* 40 */ "wdth", + /* 41 */ "hght", + /* 42 */ "flgs", /* 43 */ "table", /* Used by: foreign_key_check */ /* 44 */ "rowid", /* 45 */ "parent", /* 46 */ "fkid", - /* index_info reuses 21 */ - /* 47 */ "seq", /* Used by: database_list */ - /* 48 */ "name", - /* 49 */ "file", - /* 50 */ "busy", /* Used by: wal_checkpoint */ - /* 51 */ "log", - /* 52 */ "checkpointed", - /* collation_list reuses 38 */ + /* 47 */ "busy", /* Used by: wal_checkpoint */ + /* 48 */ "log", + /* 49 */ "checkpointed", + /* 50 */ "seq", /* Used by: database_list */ + /* 51 */ "name", + /* 52 */ "file", + /* index_info reuses 27 */ /* 53 */ "database", /* Used by: lock_status */ /* 54 */ "status", + /* collation_list reuses 33 */ /* 55 */ "cache_size", /* Used by: default_cache_size */ /* module_list pragma_list reuses 9 */ /* 56 */ "timeout", /* Used by: busy_timeout */ @@ -139267,7 +140254,7 @@ static const PragmaName aPragmaName[] = { {/* zName: */ "collation_list", /* ePragTyp: */ PragTyp_COLLATION_LIST, /* ePragFlg: */ PragFlg_Result0, - /* ColNames: */ 38, 2, + /* ColNames: */ 33, 2, /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_COMPILEOPTION_DIAGS) @@ -139302,7 +140289,7 @@ static const PragmaName aPragmaName[] = { {/* zName: */ "database_list", /* ePragTyp: */ PragTyp_DATABASE_LIST, /* ePragFlg: */ PragFlg_Result0, - /* ColNames: */ 47, 3, + /* ColNames: */ 50, 3, /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_PAGER_PRAGMAS) && !defined(SQLITE_OMIT_DEPRECATED) @@ -139382,7 +140369,7 @@ static const PragmaName aPragmaName[] = { {/* zName: */ "function_list", /* ePragTyp: */ PragTyp_FUNCTION_LIST, /* ePragFlg: */ PragFlg_Result0, - /* ColNames: */ 27, 6, + /* ColNames: */ 15, 6, /* iArg: */ 0 }, #endif #endif @@ -139411,17 +140398,17 @@ static const PragmaName aPragmaName[] = { {/* zName: */ "index_info", /* ePragTyp: */ PragTyp_INDEX_INFO, /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result1|PragFlg_SchemaOpt, - /* ColNames: */ 21, 3, + /* ColNames: */ 27, 3, /* iArg: */ 0 }, {/* zName: */ "index_list", /* ePragTyp: */ PragTyp_INDEX_LIST, /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result1|PragFlg_SchemaOpt, - /* ColNames: */ 38, 5, + /* ColNames: */ 33, 5, /* iArg: */ 0 }, {/* zName: */ "index_xinfo", /* ePragTyp: */ PragTyp_INDEX_INFO, /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result1|PragFlg_SchemaOpt, - /* ColNames: */ 21, 6, + /* ColNames: */ 27, 6, /* iArg: */ 1 }, #endif #if !defined(SQLITE_OMIT_INTEGRITY_CHECK) @@ -139600,7 +140587,7 @@ static const PragmaName aPragmaName[] = { {/* zName: */ "stats", /* ePragTyp: */ PragTyp_STATS, /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq, - /* ColNames: */ 33, 5, + /* ColNames: */ 38, 5, /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_PAGER_PRAGMAS) @@ -139619,7 +140606,7 @@ static const PragmaName aPragmaName[] = { {/* zName: */ "table_list", /* ePragTyp: */ PragTyp_TABLE_LIST, /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result1, - /* ColNames: */ 15, 6, + /* ColNames: */ 21, 6, /* iArg: */ 0 }, {/* zName: */ "table_xinfo", /* ePragTyp: */ PragTyp_TABLE_INFO, @@ -139696,7 +140683,7 @@ static const PragmaName aPragmaName[] = { {/* zName: */ "wal_checkpoint", /* ePragTyp: */ PragTyp_WAL_CHECKPOINT, /* ePragFlg: */ PragFlg_NeedSchema, - /* ColNames: */ 50, 3, + /* ColNames: */ 47, 3, /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_FLAG_PRAGMAS) @@ -139718,7 +140705,7 @@ static const PragmaName aPragmaName[] = { ** the following macro or to the actual analysis_limit if it is non-zero, ** in order to prevent PRAGMA optimize from running for too long. ** -** The value of 2000 is chosen emperically so that the worst-case run-time +** The value of 2000 is chosen empirically so that the worst-case run-time ** for PRAGMA optimize does not exceed 100 milliseconds against a variety ** of test databases on a RaspberryPI-4 compiled using -Os and without ** -DSQLITE_DEBUG. Of course, your mileage may vary. For the purpose of @@ -140835,7 +141822,10 @@ SQLITE_PRIVATE void sqlite3Pragma( } }else{ db->flags &= ~mask; - if( mask==SQLITE_DeferFKs ) db->nDeferredImmCons = 0; + if( mask==SQLITE_DeferFKs ){ + db->nDeferredImmCons = 0; + db->nDeferredCons = 0; + } if( (mask & SQLITE_WriteSchema)!=0 && sqlite3_stricmp(zRight, "reset")==0 ){ @@ -144004,7 +144994,7 @@ SQLITE_PRIVATE Select *sqlite3SelectNew( pNew->addrOpenEphm[0] = -1; pNew->addrOpenEphm[1] = -1; pNew->nSelectRow = 0; - if( pSrc==0 ) pSrc = sqlite3DbMallocZero(pParse->db, sizeof(*pSrc)); + if( pSrc==0 ) pSrc = sqlite3DbMallocZero(pParse->db, SZ_SRCLIST_1); pNew->pSrc = pSrc; pNew->pWhere = pWhere; pNew->pGroupBy = pGroupBy; @@ -144169,10 +145159,33 @@ SQLITE_PRIVATE int sqlite3JoinType(Parse *pParse, Token *pA, Token *pB, Token *p */ SQLITE_PRIVATE int sqlite3ColumnIndex(Table *pTab, const char *zCol){ int i; - u8 h = sqlite3StrIHash(zCol); - Column *pCol; - for(pCol=pTab->aCol, i=0; inCol; pCol++, i++){ - if( pCol->hName==h && sqlite3StrICmp(pCol->zCnName, zCol)==0 ) return i; + u8 h; + const Column *aCol; + int nCol; + + h = sqlite3StrIHash(zCol); + aCol = pTab->aCol; + nCol = pTab->nCol; + + /* See if the aHx gives us a lucky match */ + i = pTab->aHx[h % sizeof(pTab->aHx)]; + assert( i=nCol ) break; } return -1; } @@ -144423,7 +145436,7 @@ static int sqlite3ProcessJoin(Parse *pParse, Select *p){ } pE1 = sqlite3CreateColumnExpr(db, pSrc, iLeft, iLeftCol); sqlite3SrcItemColumnUsed(&pSrc->a[iLeft], iLeftCol); - if( (pSrc->a[0].fg.jointype & JT_LTORJ)!=0 ){ + if( (pSrc->a[0].fg.jointype & JT_LTORJ)!=0 && pParse->nErr==0 ){ /* This branch runs if the query contains one or more RIGHT or FULL ** JOINs. If only a single table on the left side of this join ** contains the zName column, then this branch is a no-op. @@ -144439,6 +145452,8 @@ static int sqlite3ProcessJoin(Parse *pParse, Select *p){ */ ExprList *pFuncArgs = 0; /* Arguments to the coalesce() */ static const Token tkCoalesce = { "coalesce", 8 }; + assert( pE1!=0 ); + ExprSetProperty(pE1, EP_CanBeNull); while( tableAndColumnIndex(pSrc, iLeft+1, i, zName, &iLeft, &iLeftCol, pRight->fg.isSynthUsing)!=0 ){ if( pSrc->a[iLeft].fg.isUsing==0 @@ -144455,7 +145470,13 @@ static int sqlite3ProcessJoin(Parse *pParse, Select *p){ if( pFuncArgs ){ pFuncArgs = sqlite3ExprListAppend(pParse, pFuncArgs, pE1); pE1 = sqlite3ExprFunction(pParse, pFuncArgs, &tkCoalesce, 0); + if( pE1 ){ + pE1->affExpr = SQLITE_AFF_DEFER; + } } + }else if( (pSrc->a[i+1].fg.jointype & JT_LEFT)!=0 && pParse->nErr==0 ){ + assert( pE1!=0 ); + ExprSetProperty(pE1, EP_CanBeNull); } pE2 = sqlite3CreateColumnExpr(db, pSrc, i+1, iRightCol); sqlite3SrcItemColumnUsed(pRight, iRightCol); @@ -145364,8 +146385,8 @@ static void selectInnerLoop( ** X extra columns. */ SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoAlloc(sqlite3 *db, int N, int X){ - int nExtra = (N+X)*(sizeof(CollSeq*)+1) - sizeof(CollSeq*); - KeyInfo *p = sqlite3DbMallocRawNN(db, sizeof(KeyInfo) + nExtra); + int nExtra = (N+X)*(sizeof(CollSeq*)+1); + KeyInfo *p = sqlite3DbMallocRawNN(db, SZ_KEYINFO(0) + nExtra); if( p ){ p->aSortFlags = (u8*)&p->aColl[N+X]; p->nKeyField = (u16)N; @@ -145373,7 +146394,7 @@ SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoAlloc(sqlite3 *db, int N, int X){ p->enc = ENC(db); p->db = db; p->nRef = 1; - memset(&p[1], 0, nExtra); + memset(p->aColl, 0, nExtra); }else{ return (KeyInfo*)sqlite3OomFault(db); } @@ -147074,6 +148095,7 @@ static int multiSelect( multi_select_end: pDest->iSdst = dest.iSdst; pDest->nSdst = dest.nSdst; + pDest->iSDParm2 = dest.iSDParm2; if( pDelete ){ sqlite3ParserAddCleanup(pParse, sqlite3SelectDeleteGeneric, pDelete); } @@ -148062,9 +149084,9 @@ static int compoundHasDifferentAffinities(Select *p){ ** from 2015-02-09.) ** ** (3) If the subquery is the right operand of a LEFT JOIN then -** (3a) the subquery may not be a join and -** (3b) the FROM clause of the subquery may not contain a virtual -** table and +** (3a) the subquery may not be a join +** (**) Was (3b): "the FROM clause of the subquery may not contain +** a virtual table" ** (**) Was: "The outer query may not have a GROUP BY." This case ** is now managed correctly ** (3d) the outer query may not be DISTINCT. @@ -148280,7 +149302,7 @@ static int flattenSubquery( */ if( (pSubitem->fg.jointype & (JT_OUTER|JT_LTORJ))!=0 ){ if( pSubSrc->nSrc>1 /* (3a) */ - || IsVirtual(pSubSrc->a[0].pSTab) /* (3b) */ + /**** || IsVirtual(pSubSrc->a[0].pSTab) (3b)-omitted */ || (p->selFlags & SF_Distinct)!=0 /* (3d) */ || (pSubitem->fg.jointype & JT_RIGHT)!=0 /* (26) */ ){ @@ -148684,7 +149706,8 @@ static void constInsert( return; /* Already present. Return without doing anything. */ } } - if( sqlite3ExprAffinity(pColumn)==SQLITE_AFF_BLOB ){ + assert( SQLITE_AFF_NONEbHasAffBlob = 1; } @@ -148759,7 +149782,8 @@ static int propagateConstantExprRewriteOne( if( pColumn==pExpr ) continue; if( pColumn->iTable!=pExpr->iTable ) continue; if( pColumn->iColumn!=pExpr->iColumn ) continue; - if( bIgnoreAffBlob && sqlite3ExprAffinity(pColumn)==SQLITE_AFF_BLOB ){ + assert( SQLITE_AFF_NONEpWinDefn = 0; #endif - p->selFlags &= ~SF_Compound; + p->selFlags &= ~(u32)SF_Compound; assert( (p->selFlags & SF_Converted)==0 ); p->selFlags |= SF_Converted; assert( pNew->pPrior!=0 ); @@ -149889,7 +150913,7 @@ static int selectExpander(Walker *pWalker, Select *p){ pEList = p->pEList; if( pParse->pWith && (p->selFlags & SF_View) ){ if( p->pWith==0 ){ - p->pWith = (With*)sqlite3DbMallocZero(db, sizeof(With)); + p->pWith = (With*)sqlite3DbMallocZero(db, SZ_WITH(1) ); if( p->pWith==0 ){ return WRC_Abort; } @@ -151028,6 +152052,7 @@ static void agginfoFree(sqlite3 *db, void *pArg){ ** * There is no WHERE or GROUP BY or HAVING clauses on the subqueries ** * The outer query is a simple count(*) with no WHERE clause or other ** extraneous syntax. +** * None of the subqueries are DISTINCT (forumpost/a860f5fb2e 2025-03-10) ** ** Return TRUE if the optimization is undertaken. */ @@ -151060,7 +152085,11 @@ static int countOfViewOptimization(Parse *pParse, Select *p){ if( pSub->op!=TK_ALL && pSub->pPrior ) return 0; /* Must be UNION ALL */ if( pSub->pWhere ) return 0; /* No WHERE clause */ if( pSub->pLimit ) return 0; /* No LIMIT clause */ - if( pSub->selFlags & SF_Aggregate ) return 0; /* Not an aggregate */ + if( pSub->selFlags & (SF_Aggregate|SF_Distinct) ){ + testcase( pSub->selFlags & SF_Aggregate ); + testcase( pSub->selFlags & SF_Distinct ); + return 0; /* Not an aggregate nor DISTINCT */ + } assert( pSub->pHaving==0 ); /* Due to the previous */ pSub = pSub->pPrior; /* Repeat over compound */ }while( pSub ); @@ -151072,14 +152101,14 @@ static int countOfViewOptimization(Parse *pParse, Select *p){ pExpr = 0; pSub = sqlite3SubqueryDetach(db, pFrom); sqlite3SrcListDelete(db, p->pSrc); - p->pSrc = sqlite3DbMallocZero(pParse->db, sizeof(*p->pSrc)); + p->pSrc = sqlite3DbMallocZero(pParse->db, SZ_SRCLIST_1); while( pSub ){ Expr *pTerm; pPrior = pSub->pPrior; pSub->pPrior = 0; pSub->pNext = 0; pSub->selFlags |= SF_Aggregate; - pSub->selFlags &= ~SF_Compound; + pSub->selFlags &= ~(u32)SF_Compound; pSub->nSelectRow = 0; sqlite3ParserAddCleanup(pParse, sqlite3ExprListDeleteGeneric, pSub->pEList); pTerm = pPrior ? sqlite3ExprDup(db, pCount, 0) : pCount; @@ -151094,7 +152123,7 @@ static int countOfViewOptimization(Parse *pParse, Select *p){ pSub = pPrior; } p->pEList->a[0].pExpr = pExpr; - p->selFlags &= ~SF_Aggregate; + p->selFlags &= ~(u32)SF_Aggregate; #if TREETRACE_ENABLED if( sqlite3TreeTrace & 0x200 ){ @@ -151301,7 +152330,7 @@ SQLITE_PRIVATE int sqlite3Select( testcase( pParse->earlyCleanup ); p->pOrderBy = 0; } - p->selFlags &= ~SF_Distinct; + p->selFlags &= ~(u32)SF_Distinct; p->selFlags |= SF_NoopOrderBy; } sqlite3SelectPrep(pParse, p, 0); @@ -151340,7 +152369,7 @@ SQLITE_PRIVATE int sqlite3Select( ** and leaving this flag set can cause errors if a compound sub-query ** in p->pSrc is flattened into this query and this function called ** again as part of compound SELECT processing. */ - p->selFlags &= ~SF_UFSrcCheck; + p->selFlags &= ~(u32)SF_UFSrcCheck; } if( pDest->eDest==SRT_Output ){ @@ -151829,7 +152858,7 @@ SQLITE_PRIVATE int sqlite3Select( && p->pWin==0 #endif ){ - p->selFlags &= ~SF_Distinct; + p->selFlags &= ~(u32)SF_Distinct; pGroupBy = p->pGroupBy = sqlite3ExprListDup(db, pEList, 0); if( pGroupBy ){ for(i=0; inExpr; i++){ @@ -151938,6 +152967,12 @@ SQLITE_PRIVATE int sqlite3Select( if( pWInfo==0 ) goto select_end; if( sqlite3WhereOutputRowCount(pWInfo) < p->nSelectRow ){ p->nSelectRow = sqlite3WhereOutputRowCount(pWInfo); + if( pDest->eDest<=SRT_DistQueue && pDest->eDest>=SRT_DistFifo ){ + /* TUNING: For a UNION CTE, because UNION is implies DISTINCT, + ** reduce the estimated output row count by 8 (LogEst 30). + ** Search for tag-20250414a to see other cases */ + p->nSelectRow -= 30; + } } if( sDistinct.isTnct && sqlite3WhereIsDistinct(pWInfo) ){ sDistinct.eTnctType = sqlite3WhereIsDistinct(pWInfo); @@ -152168,6 +153203,7 @@ SQLITE_PRIVATE int sqlite3Select( sqlite3VdbeAddOp2(v, OP_Integer, 0, iAbortFlag); VdbeComment((v, "clear abort flag")); sqlite3VdbeAddOp3(v, OP_Null, 0, iAMem, iAMem+pGroupBy->nExpr-1); + sqlite3ExprNullRegisterRange(pParse, iAMem, pGroupBy->nExpr); /* Begin a loop that will extract all source rows in GROUP BY order. ** This might involve two separate loops with an OP_Sort in between, or @@ -152311,6 +153347,10 @@ SQLITE_PRIVATE int sqlite3Select( if( iOrderByCol ){ Expr *pX = p->pEList->a[iOrderByCol-1].pExpr; Expr *pBase = sqlite3ExprSkipCollateAndLikely(pX); + while( ALWAYS(pBase!=0) && pBase->op==TK_IF_NULL_ROW ){ + pX = pBase->pLeft; + pBase = sqlite3ExprSkipCollateAndLikely(pX); + } if( ALWAYS(pBase!=0) && pBase->op!=TK_AGG_COLUMN && pBase->op!=TK_REGISTER @@ -152894,7 +153934,8 @@ SQLITE_PRIVATE Trigger *sqlite3TriggerList(Parse *pParse, Table *pTab){ assert( pParse->db->pVtabCtx==0 ); #endif assert( pParse->bReturning ); - assert( &(pParse->u1.pReturning->retTrig) == pTrig ); + assert( !pParse->isCreate ); + assert( &(pParse->u1.d.pReturning->retTrig) == pTrig ); pTrig->table = pTab->zName; pTrig->pTabSchema = pTab->pSchema; pTrig->pNext = pList; @@ -153862,7 +154903,8 @@ static void codeReturningTrigger( ExprList *pNew; Returning *pReturning; Select sSelect; - SrcList sFrom; + SrcList *pFrom; + u8 fromSpace[SZ_SRCLIST_1]; assert( v!=0 ); if( !pParse->bReturning ){ @@ -153871,19 +154913,21 @@ static void codeReturningTrigger( return; } assert( db->pParse==pParse ); - pReturning = pParse->u1.pReturning; + assert( !pParse->isCreate ); + pReturning = pParse->u1.d.pReturning; if( pTrigger != &(pReturning->retTrig) ){ /* This RETURNING trigger is for a different statement */ return; } memset(&sSelect, 0, sizeof(sSelect)); - memset(&sFrom, 0, sizeof(sFrom)); + pFrom = (SrcList*)fromSpace; + memset(pFrom, 0, SZ_SRCLIST_1); sSelect.pEList = sqlite3ExprListDup(db, pReturning->pReturnEL, 0); - sSelect.pSrc = &sFrom; - sFrom.nSrc = 1; - sFrom.a[0].pSTab = pTab; - sFrom.a[0].zName = pTab->zName; /* tag-20240424-1 */ - sFrom.a[0].iCursor = -1; + sSelect.pSrc = pFrom; + pFrom->nSrc = 1; + pFrom->a[0].pSTab = pTab; + pFrom->a[0].zName = pTab->zName; /* tag-20240424-1 */ + pFrom->a[0].iCursor = -1; sqlite3SelectPrep(pParse, &sSelect, 0); if( pParse->nErr==0 ){ assert( db->mallocFailed==0 ); @@ -154101,6 +155145,8 @@ static TriggerPrg *codeRowTrigger( sSubParse.eTriggerOp = pTrigger->op; sSubParse.nQueryLoop = pParse->nQueryLoop; sSubParse.prepFlags = pParse->prepFlags; + sSubParse.oldmask = 0; + sSubParse.newmask = 0; v = sqlite3GetVdbe(&sSubParse); if( v ){ @@ -154855,38 +155901,32 @@ SQLITE_PRIVATE void sqlite3Update( */ chngRowid = chngPk = 0; for(i=0; inExpr; i++){ - u8 hCol = sqlite3StrIHash(pChanges->a[i].zEName); /* If this is an UPDATE with a FROM clause, do not resolve expressions ** here. The call to sqlite3Select() below will do that. */ if( nChangeFrom==0 && sqlite3ResolveExprNames(&sNC, pChanges->a[i].pExpr) ){ goto update_cleanup; } - for(j=0; jnCol; j++){ - if( pTab->aCol[j].hName==hCol - && sqlite3StrICmp(pTab->aCol[j].zCnName, pChanges->a[i].zEName)==0 - ){ - if( j==pTab->iPKey ){ - chngRowid = 1; - pRowidExpr = pChanges->a[i].pExpr; - iRowidExpr = i; - }else if( pPk && (pTab->aCol[j].colFlags & COLFLAG_PRIMKEY)!=0 ){ - chngPk = 1; - } + j = sqlite3ColumnIndex(pTab, pChanges->a[i].zEName); + if( j>=0 ){ + if( j==pTab->iPKey ){ + chngRowid = 1; + pRowidExpr = pChanges->a[i].pExpr; + iRowidExpr = i; + }else if( pPk && (pTab->aCol[j].colFlags & COLFLAG_PRIMKEY)!=0 ){ + chngPk = 1; + } #ifndef SQLITE_OMIT_GENERATED_COLUMNS - else if( pTab->aCol[j].colFlags & COLFLAG_GENERATED ){ - testcase( pTab->aCol[j].colFlags & COLFLAG_VIRTUAL ); - testcase( pTab->aCol[j].colFlags & COLFLAG_STORED ); - sqlite3ErrorMsg(pParse, - "cannot UPDATE generated column \"%s\"", - pTab->aCol[j].zCnName); - goto update_cleanup; - } -#endif - aXRef[j] = i; - break; + else if( pTab->aCol[j].colFlags & COLFLAG_GENERATED ){ + testcase( pTab->aCol[j].colFlags & COLFLAG_VIRTUAL ); + testcase( pTab->aCol[j].colFlags & COLFLAG_STORED ); + sqlite3ErrorMsg(pParse, + "cannot UPDATE generated column \"%s\"", + pTab->aCol[j].zCnName); + goto update_cleanup; } - } - if( j>=pTab->nCol ){ +#endif + aXRef[j] = i; + }else{ if( pPk==0 && sqlite3IsRowid(pChanges->a[i].zEName) ){ j = -1; chngRowid = 1; @@ -156209,7 +157249,7 @@ SQLITE_PRIVATE void sqlite3Vacuum(Parse *pParse, Token *pNm, Expr *pInto){ #else /* When SQLITE_BUG_COMPATIBLE_20160819 is defined, unrecognized arguments ** to VACUUM are silently ignored. This is a back-out of a bug fix that - ** occurred on 2016-08-19 (https://www.sqlite.org/src/info/083f9e6270). + ** occurred on 2016-08-19 (https://sqlite.org/src/info/083f9e6270). ** The buggy behavior is required for binary compatibility with some ** legacy applications. */ iDb = sqlite3FindDb(pParse->db, pNm); @@ -156288,7 +157328,7 @@ SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3RunVacuum( saved_nChange = db->nChange; saved_nTotalChange = db->nTotalChange; saved_mTrace = db->mTrace; - db->flags |= SQLITE_WriteSchema | SQLITE_IgnoreChecks; + db->flags |= SQLITE_WriteSchema | SQLITE_IgnoreChecks | SQLITE_Comments; db->mDbFlags |= DBFLAG_PreferBuiltin | DBFLAG_Vacuum; db->flags &= ~(u64)(SQLITE_ForeignKeys | SQLITE_ReverseOrder | SQLITE_Defensive | SQLITE_CountRows); @@ -156993,11 +158033,12 @@ SQLITE_PRIVATE void sqlite3VtabFinishParse(Parse *pParse, Token *pEnd){ ** schema table. We just need to update that slot with all ** the information we've collected. ** - ** The VM register number pParse->regRowid holds the rowid of an + ** The VM register number pParse->u1.cr.regRowid holds the rowid of an ** entry in the sqlite_schema table that was created for this vtab ** by sqlite3StartTable(). */ iDb = sqlite3SchemaToIndex(db, pTab->pSchema); + assert( pParse->isCreate ); sqlite3NestedParse(pParse, "UPDATE %Q." LEGACY_SCHEMA_TABLE " " "SET type='table', name=%Q, tbl_name=%Q, rootpage=0, sql=%Q " @@ -157006,7 +158047,7 @@ SQLITE_PRIVATE void sqlite3VtabFinishParse(Parse *pParse, Token *pEnd){ pTab->zName, pTab->zName, zStmt, - pParse->regRowid + pParse->u1.cr.regRowid ); v = sqlite3GetVdbe(pParse); sqlite3ChangeCookie(pParse, iDb); @@ -158416,9 +159457,14 @@ struct WhereInfo { Bitmask revMask; /* Mask of ORDER BY terms that need reversing */ WhereClause sWC; /* Decomposition of the WHERE clause */ WhereMaskSet sMaskSet; /* Map cursor numbers to bitmasks */ - WhereLevel a[1]; /* Information about each nest loop in WHERE */ + WhereLevel a[FLEXARRAY]; /* Information about each nest loop in WHERE */ }; +/* +** The size (in bytes) of a WhereInfo object that holds N WhereLevels. +*/ +#define SZ_WHEREINFO(N) ROUND8(offsetof(WhereInfo,a)+(N)*sizeof(WhereLevel)) + /* ** Private interfaces - callable only by other where.c routines. ** @@ -159098,7 +160144,7 @@ static void adjustOrderByCol(ExprList *pOrderBy, ExprList *pEList){ /* ** pX is an expression of the form: (vector) IN (SELECT ...) ** In other words, it is a vector IN operator with a SELECT clause on the -** LHS. But not all terms in the vector are indexable and the terms might +** RHS. But not all terms in the vector are indexable and the terms might ** not be in the correct order for indexing. ** ** This routine makes a copy of the input pX expression and then adjusts @@ -159154,7 +160200,9 @@ static Expr *removeUnindexableInClauseTerms( int iField; assert( (pLoop->aLTerm[i]->eOperator & (WO_OR|WO_AND))==0 ); iField = pLoop->aLTerm[i]->u.x.iField - 1; - if( pOrigRhs->a[iField].pExpr==0 ) continue; /* Duplicate PK column */ + if( NEVER(pOrigRhs->a[iField].pExpr==0) ){ + continue; /* Duplicate PK column */ + } pRhs = sqlite3ExprListAppend(pParse, pRhs, pOrigRhs->a[iField].pExpr); pOrigRhs->a[iField].pExpr = 0; if( pRhs ) pRhs->a[pRhs->nExpr-1].u.x.iOrderByCol = iField+1; @@ -159251,7 +160299,7 @@ static SQLITE_NOINLINE void codeINTerm( return; } } - for(i=iEq;inLTerm; i++){ + for(i=iEq; inLTerm; i++){ assert( pLoop->aLTerm[i]!=0 ); if( pLoop->aLTerm[i]->pExpr==pX ) nEq++; } @@ -159260,22 +160308,13 @@ static SQLITE_NOINLINE void codeINTerm( if( !ExprUseXSelect(pX) || pX->x.pSelect->pEList->nExpr==1 ){ eType = sqlite3FindInIndex(pParse, pX, IN_INDEX_LOOP, 0, 0, &iTab); }else{ - Expr *pExpr = pTerm->pExpr; - if( pExpr->iTable==0 || !ExprHasProperty(pExpr, EP_Subrtn) ){ - sqlite3 *db = pParse->db; - pX = removeUnindexableInClauseTerms(pParse, iEq, pLoop, pX); - if( !db->mallocFailed ){ - aiMap = (int*)sqlite3DbMallocZero(pParse->db, sizeof(int)*nEq); - eType = sqlite3FindInIndex(pParse, pX, IN_INDEX_LOOP, 0, aiMap,&iTab); - pExpr->iTable = iTab; - } - sqlite3ExprDelete(db, pX); - }else{ - int n = sqlite3ExprVectorSize(pX->pLeft); - aiMap = (int*)sqlite3DbMallocZero(pParse->db, sizeof(int)*MAX(nEq,n)); - eType = sqlite3FindInIndex(pParse, pX, IN_INDEX_LOOP, 0, aiMap, &iTab); + sqlite3 *db = pParse->db; + Expr *pXMod = removeUnindexableInClauseTerms(pParse, iEq, pLoop, pX); + if( !db->mallocFailed ){ + aiMap = (int*)sqlite3DbMallocZero(db, sizeof(int)*nEq); + eType = sqlite3FindInIndex(pParse, pXMod, IN_INDEX_LOOP, 0, aiMap, &iTab); } - pX = pExpr; + sqlite3ExprDelete(db, pXMod); } if( eType==IN_INDEX_INDEX_DESC ){ @@ -159305,7 +160344,7 @@ static SQLITE_NOINLINE void codeINTerm( if( pIn ){ int iMap = 0; /* Index in aiMap[] */ pIn += i; - for(i=iEq;inLTerm; i++){ + for(i=iEq; inLTerm; i++){ if( pLoop->aLTerm[i]->pExpr==pX ){ int iOut = iTarget + i - iEq; if( eType==IN_INDEX_ROWID ){ @@ -160164,6 +161203,9 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( } sqlite3VdbeAddOp2(v, OP_Integer, pLoop->u.vtab.idxNum, iReg); sqlite3VdbeAddOp2(v, OP_Integer, nConstraint, iReg+1); + /* The instruction immediately prior to OP_VFilter must be an OP_Integer + ** that sets the "argc" value for xVFilter. This is necessary for + ** resolveP2() to work correctly. See tag-20250207a. */ sqlite3VdbeAddOp4(v, OP_VFilter, iCur, addrNotFound, iReg, pLoop->u.vtab.idxStr, pLoop->u.vtab.needFree ? P4_DYNAMIC : P4_STATIC); @@ -160754,12 +161796,13 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( if( pLevel->iLeftJoin==0 ){ /* If a partial index is driving the loop, try to eliminate WHERE clause ** terms from the query that must be true due to the WHERE clause of - ** the partial index. + ** the partial index. This optimization does not work on an outer join, + ** as shown by: ** - ** 2019-11-02 ticket 623eff57e76d45f6: This optimization does not work - ** for a LEFT JOIN. + ** 2019-11-02 ticket 623eff57e76d45f6 (LEFT JOIN) + ** 2025-05-29 forum post 7dee41d32506c4ae (RIGHT JOIN) */ - if( pIdx->pPartIdxWhere ){ + if( pIdx->pPartIdxWhere && pLevel->pRJ==0 ){ whereApplyPartialIndexConstraints(pIdx->pPartIdxWhere, iCur, pWC); } }else{ @@ -160866,8 +161909,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( int nNotReady; /* The number of notReady tables */ SrcItem *origSrc; /* Original list of tables */ nNotReady = pWInfo->nLevel - iLevel - 1; - pOrTab = sqlite3DbMallocRawNN(db, - sizeof(*pOrTab)+ nNotReady*sizeof(pOrTab->a[0])); + pOrTab = sqlite3DbMallocRawNN(db, SZ_SRCLIST(nNotReady+1)); if( pOrTab==0 ) return notReady; pOrTab->nAlloc = (u8)(nNotReady + 1); pOrTab->nSrc = pOrTab->nAlloc; @@ -160918,7 +161960,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( ** ** This optimization also only applies if the (x1 OR x2 OR ...) term ** is not contained in the ON clause of a LEFT JOIN. - ** See ticket http://www.sqlite.org/src/info/f2369304e4 + ** See ticket http://sqlite.org/src/info/f2369304e4 ** ** 2022-02-04: Do not push down slices of a row-value comparison. ** In other words, "w" or "y" may not be a slice of a vector. Otherwise, @@ -161410,7 +162452,8 @@ SQLITE_PRIVATE SQLITE_NOINLINE void sqlite3WhereRightJoinLoop( WhereInfo *pSubWInfo; WhereLoop *pLoop = pLevel->pWLoop; SrcItem *pTabItem = &pWInfo->pTabList->a[pLevel->iFrom]; - SrcList sFrom; + SrcList *pFrom; + u8 fromSpace[SZ_SRCLIST_1]; Bitmask mAll = 0; int k; @@ -161454,13 +162497,14 @@ SQLITE_PRIVATE SQLITE_NOINLINE void sqlite3WhereRightJoinLoop( sqlite3ExprDup(pParse->db, pTerm->pExpr, 0)); } } - sFrom.nSrc = 1; - sFrom.nAlloc = 1; - memcpy(&sFrom.a[0], pTabItem, sizeof(SrcItem)); - sFrom.a[0].fg.jointype = 0; + pFrom = (SrcList*)fromSpace; + pFrom->nSrc = 1; + pFrom->nAlloc = 1; + memcpy(&pFrom->a[0], pTabItem, sizeof(SrcItem)); + pFrom->a[0].fg.jointype = 0; assert( pParse->withinRJSubrtn < 100 ); pParse->withinRJSubrtn++; - pSubWInfo = sqlite3WhereBegin(pParse, &sFrom, pSubWhere, 0, 0, 0, + pSubWInfo = sqlite3WhereBegin(pParse, pFrom, pSubWhere, 0, 0, 0, WHERE_RIGHT_JOIN, 0); if( pSubWInfo ){ int iCur = pLevel->iTabCur; @@ -162431,30 +163475,42 @@ static void exprAnalyzeOrTerm( ** 1. The SQLITE_Transitive optimization must be enabled ** 2. Must be either an == or an IS operator ** 3. Not originating in the ON clause of an OUTER JOIN -** 4. The affinities of A and B must be compatible -** 5a. Both operands use the same collating sequence OR -** 5b. The overall collating sequence is BINARY +** 4. The operator is not IS or else the query does not contain RIGHT JOIN +** 5. The affinities of A and B must be compatible +** 6a. Both operands use the same collating sequence OR +** 6b. The overall collating sequence is BINARY ** If this routine returns TRUE, that means that the RHS can be substituted ** for the LHS anyplace else in the WHERE clause where the LHS column occurs. ** This is an optimization. No harm comes from returning 0. But if 1 is ** returned when it should not be, then incorrect answers might result. */ -static int termIsEquivalence(Parse *pParse, Expr *pExpr){ +static int termIsEquivalence(Parse *pParse, Expr *pExpr, SrcList *pSrc){ char aff1, aff2; CollSeq *pColl; - if( !OptimizationEnabled(pParse->db, SQLITE_Transitive) ) return 0; - if( pExpr->op!=TK_EQ && pExpr->op!=TK_IS ) return 0; - if( ExprHasProperty(pExpr, EP_OuterON) ) return 0; + if( !OptimizationEnabled(pParse->db, SQLITE_Transitive) ) return 0; /* (1) */ + if( pExpr->op!=TK_EQ && pExpr->op!=TK_IS ) return 0; /* (2) */ + if( ExprHasProperty(pExpr, EP_OuterON) ) return 0; /* (3) */ + assert( pSrc!=0 ); + if( pExpr->op==TK_IS + && pSrc->nSrc + && (pSrc->a[0].fg.jointype & JT_LTORJ)!=0 + ){ + return 0; /* (4) */ + } aff1 = sqlite3ExprAffinity(pExpr->pLeft); aff2 = sqlite3ExprAffinity(pExpr->pRight); if( aff1!=aff2 && (!sqlite3IsNumericAffinity(aff1) || !sqlite3IsNumericAffinity(aff2)) ){ - return 0; + return 0; /* (5) */ } pColl = sqlite3ExprCompareCollSeq(pParse, pExpr); - if( sqlite3IsBinary(pColl) ) return 1; - return sqlite3ExprCollSeqMatch(pParse, pExpr->pLeft, pExpr->pRight); + if( !sqlite3IsBinary(pColl) + && !sqlite3ExprCollSeqMatch(pParse, pExpr->pLeft, pExpr->pRight) + ){ + return 0; /* (6) */ + } + return 1; } /* @@ -162719,8 +163775,8 @@ static void exprAnalyze( if( op==TK_IS ) pNew->wtFlags |= TERM_IS; pTerm = &pWC->a[idxTerm]; pTerm->wtFlags |= TERM_COPIED; - - if( termIsEquivalence(pParse, pDup) ){ + assert( pWInfo->pTabList!=0 ); + if( termIsEquivalence(pParse, pDup, pWInfo->pTabList) ){ pTerm->eOperator |= WO_EQUIV; eExtraOp = WO_EQUIV; } @@ -163448,11 +164504,16 @@ struct HiddenIndexInfo { int eDistinct; /* Value to return from sqlite3_vtab_distinct() */ u32 mIn; /* Mask of terms that are IN (...) */ u32 mHandleIn; /* Terms that vtab will handle as IN (...) */ - sqlite3_value *aRhs[1]; /* RHS values for constraints. MUST BE LAST - ** because extra space is allocated to hold up - ** to nTerm such values */ + sqlite3_value *aRhs[FLEXARRAY]; /* RHS values for constraints. MUST BE LAST + ** Extra space is allocated to hold up + ** to nTerm such values */ }; +/* Size (in bytes) of a HiddenIndeInfo object sufficient to hold as +** many as N constraints */ +#define SZ_HIDDENINDEXINFO(N) \ + (offsetof(HiddenIndexInfo,aRhs) + (N)*sizeof(sqlite3_value*)) + /* Forward declaration of methods */ static int whereLoopResize(sqlite3*, WhereLoop*, int); @@ -164517,6 +165578,8 @@ static SQLITE_NOINLINE void constructAutomaticIndex( } /* Construct the Index object to describe this index */ + assert( nKeyCol <= pTable->nCol + MAX(0, pTable->nCol - BMS + 1) ); + /* ^-- This guarantees that the number of index columns will fit in the u16 */ pIdx = sqlite3AllocateIndexObject(pParse->db, nKeyCol+HasRowid(pTable), 0, &zNotUsed); if( pIdx==0 ) goto end_auto_index_create; @@ -164928,8 +165991,8 @@ static sqlite3_index_info *allocateIndexInfo( */ pIdxInfo = sqlite3DbMallocZero(pParse->db, sizeof(*pIdxInfo) + (sizeof(*pIdxCons) + sizeof(*pUsage))*nTerm - + sizeof(*pIdxOrderBy)*nOrderBy + sizeof(*pHidden) - + sizeof(sqlite3_value*)*nTerm ); + + sizeof(*pIdxOrderBy)*nOrderBy + + SZ_HIDDENINDEXINFO(nTerm) ); if( pIdxInfo==0 ){ sqlite3ErrorMsg(pParse, "out of memory"); return 0; @@ -166565,11 +167628,8 @@ static int whereLoopAddBtreeIndex( assert( pNew->u.btree.nBtm==0 ); opMask = WO_EQ|WO_IN|WO_GT|WO_GE|WO_LT|WO_LE|WO_ISNULL|WO_IS; } - if( pProbe->bUnordered || pProbe->bLowQual ){ - if( pProbe->bUnordered ) opMask &= ~(WO_GT|WO_GE|WO_LT|WO_LE); - if( pProbe->bLowQual && pSrc->fg.isIndexedBy==0 ){ - opMask &= ~(WO_EQ|WO_IN|WO_IS); - } + if( pProbe->bUnordered ){ + opMask &= ~(WO_GT|WO_GE|WO_LT|WO_LE); } assert( pNew->u.btree.nEqnColumn ); @@ -166642,6 +167702,7 @@ static int whereLoopAddBtreeIndex( if( ExprUseXSelect(pExpr) ){ /* "x IN (SELECT ...)": TUNING: the SELECT returns 25 rows */ int i; + int bRedundant = 0; nIn = 46; assert( 46==sqlite3LogEst(25) ); /* The expression may actually be of the form (x, y) IN (SELECT...). @@ -166650,7 +167711,20 @@ static int whereLoopAddBtreeIndex( ** for each such term. The following loop checks that pTerm is the ** first such term in use, and sets nIn back to 0 if it is not. */ for(i=0; inLTerm-1; i++){ - if( pNew->aLTerm[i] && pNew->aLTerm[i]->pExpr==pExpr ) nIn = 0; + if( pNew->aLTerm[i] && pNew->aLTerm[i]->pExpr==pExpr ){ + nIn = 0; + if( pNew->aLTerm[i]->u.x.iField == pTerm->u.x.iField ){ + /* Detect when two or more columns of an index match the same + ** column of a vector IN operater, and avoid adding the column + ** to the WhereLoop more than once. See tag-20250707-01 + ** in test/rowvalue.test */ + bRedundant = 1; + } + } + } + if( bRedundant ){ + pNew->nLTerm--; + continue; } }else if( ALWAYS(pExpr->x.pList && pExpr->x.pList->nExpr) ){ /* "x IN (value, value, ...)" */ @@ -166882,7 +167956,7 @@ static int whereLoopAddBtreeIndex( if( (pNew->wsFlags & WHERE_TOP_LIMIT)==0 && pNew->u.btree.nEqnColumn && (pNew->u.btree.nEqnKeyCol || - pProbe->idxType!=SQLITE_IDXTYPE_PRIMARYKEY) + pProbe->idxType!=SQLITE_IDXTYPE_PRIMARYKEY) ){ if( pNew->u.btree.nEq>3 ){ sqlite3ProgressCheck(pParse); @@ -167011,6 +168085,7 @@ static int whereUsablePartialIndex( if( (!ExprHasProperty(pExpr, EP_OuterON) || pExpr->w.iJoin==iTab) && ((jointype & JT_OUTER)==0 || ExprHasProperty(pExpr, EP_OuterON)) && sqlite3ExprImpliesExpr(pParse, pExpr, pWhere, iTab) + && !sqlite3ExprImpliesExpr(pParse, pExpr, pWhere, -1) && (pTerm->wtFlags & TERM_VNULL)==0 ){ return 1; @@ -167424,6 +168499,7 @@ static int whereLoopAddBtree( pNew->u.btree.nEq = 0; pNew->u.btree.nBtm = 0; pNew->u.btree.nTop = 0; + pNew->u.btree.nDistinctCol = 0; pNew->nSkip = 0; pNew->nLTerm = 0; pNew->iSortIdx = 0; @@ -167506,7 +168582,7 @@ static int whereLoopAddBtree( && (HasRowid(pTab) || pWInfo->pSelect!=0 || sqlite3FaultSim(700)) ){ WHERETRACE(0x200, - ("-> %s a covering index according to bitmasks\n", + ("-> %s is a covering index according to bitmasks\n", pProbe->zName, m==0 ? "is" : "is not")); pNew->wsFlags = WHERE_IDX_ONLY | WHERE_INDEXED; } @@ -168492,8 +169568,6 @@ static i8 wherePathSatisfiesOrderBy( obSat = obDone; } break; - }else if( wctrlFlags & WHERE_DISTINCTBY ){ - pLoop->u.btree.nDistinctCol = 0; } iCur = pWInfo->pTabList->a[pLoop->iTab].iCursor; @@ -170123,10 +171197,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( ** field (type Bitmask) it must be aligned on an 8-byte boundary on ** some architectures. Hence the ROUND8() below. */ - nByteWInfo = ROUND8P(sizeof(WhereInfo)); - if( nTabList>1 ){ - nByteWInfo = ROUND8P(nByteWInfo + (nTabList-1)*sizeof(WhereLevel)); - } + nByteWInfo = SZ_WHEREINFO(nTabList); pWInfo = sqlite3DbMallocRawNN(db, nByteWInfo + sizeof(WhereLoop)); if( db->mallocFailed ){ sqlite3DbFree(db, pWInfo); @@ -170343,7 +171414,8 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( } /* TUNING: Assume that a DISTINCT clause on a subquery reduces - ** the output size by a factor of 8 (LogEst -30). + ** the output size by a factor of 8 (LogEst -30). Search for + ** tag-20250414a to see other cases. */ if( (pWInfo->wctrlFlags & WHERE_WANT_DISTINCT)!=0 ){ WHERETRACE(0x0080,("nRowOut reduced from %d to %d due to DISTINCT\n", @@ -172078,7 +173150,7 @@ SQLITE_PRIVATE int sqlite3WindowRewrite(Parse *pParse, Select *p){ p->pWhere = 0; p->pGroupBy = 0; p->pHaving = 0; - p->selFlags &= ~SF_Aggregate; + p->selFlags &= ~(u32)SF_Aggregate; p->selFlags |= SF_WinRewrite; /* Create the ORDER BY clause for the sub-select. This is the concatenation @@ -174218,6 +175290,11 @@ SQLITE_PRIVATE void sqlite3WindowCodeStep( /* #include "sqliteInt.h" */ +/* +** Verify that the pParse->isCreate field is set +*/ +#define ASSERT_IS_CREATE assert(pParse->isCreate) + /* ** Disable all error recovery processing in the parser push-down ** automaton. @@ -174281,6 +175358,10 @@ static void parserSyntaxError(Parse *pParse, Token *p){ static void disableLookaside(Parse *pParse){ sqlite3 *db = pParse->db; pParse->disableLookaside++; +#ifdef SQLITE_DEBUG + pParse->isCreate = 1; +#endif + memset(&pParse->u1.cr, 0, sizeof(pParse->u1.cr)); DisableLookaside; } @@ -177917,7 +178998,9 @@ static YYACTIONTYPE yy_reduce( } break; case 14: /* createkw ::= CREATE */ -{disableLookaside(pParse);} +{ + disableLookaside(pParse); +} break; case 15: /* ifnotexists ::= */ case 18: /* temp ::= */ yytestcase(yyruleno==18); @@ -178009,7 +179092,7 @@ static YYACTIONTYPE yy_reduce( break; case 32: /* ccons ::= CONSTRAINT nm */ case 67: /* tcons ::= CONSTRAINT nm */ yytestcase(yyruleno==67); -{pParse->constraintName = yymsp[0].minor.yy0;} +{ASSERT_IS_CREATE; pParse->u1.cr.constraintName = yymsp[0].minor.yy0;} break; case 33: /* ccons ::= DEFAULT scantok term */ {sqlite3AddDefaultValue(pParse,yymsp[0].minor.yy590,yymsp[-1].minor.yy0.z,&yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]);} @@ -178119,7 +179202,7 @@ static YYACTIONTYPE yy_reduce( {yymsp[-1].minor.yy502 = 0;} break; case 66: /* tconscomma ::= COMMA */ -{pParse->constraintName.n = 0;} +{ASSERT_IS_CREATE; pParse->u1.cr.constraintName.n = 0;} break; case 68: /* tcons ::= PRIMARY KEY LP sortlist autoinc RP onconf */ {sqlite3AddPrimaryKey(pParse,yymsp[-3].minor.yy402,yymsp[0].minor.yy502,yymsp[-2].minor.yy502,0);} @@ -178206,8 +179289,8 @@ static YYACTIONTYPE yy_reduce( if( pRhs ){ pRhs->op = (u8)yymsp[-1].minor.yy502; pRhs->pPrior = pLhs; - if( ALWAYS(pLhs) ) pLhs->selFlags &= ~SF_MultiValue; - pRhs->selFlags &= ~SF_MultiValue; + if( ALWAYS(pLhs) ) pLhs->selFlags &= ~(u32)SF_MultiValue; + pRhs->selFlags &= ~(u32)SF_MultiValue; if( yymsp[-1].minor.yy502!=TK_ALL ) pParse->hasCompound = 1; }else{ sqlite3SelectDelete(pParse->db, pLhs); @@ -178847,12 +179930,21 @@ static YYACTIONTYPE yy_reduce( ** expr1 IN () ** expr1 NOT IN () ** - ** simplify to constants 0 (false) and 1 (true), respectively, - ** regardless of the value of expr1. + ** simplify to constants 0 (false) and 1 (true), respectively. + ** + ** Except, do not apply this optimization if expr1 contains a function + ** because that function might be an aggregate (we don't know yet whether + ** it is or not) and if it is an aggregate, that could change the meaning + ** of the whole query. */ - sqlite3ExprUnmapAndDelete(pParse, yymsp[-4].minor.yy590); - yymsp[-4].minor.yy590 = sqlite3Expr(pParse->db, TK_STRING, yymsp[-3].minor.yy502 ? "true" : "false"); - if( yymsp[-4].minor.yy590 ) sqlite3ExprIdToTrueFalse(yymsp[-4].minor.yy590); + Expr *pB = sqlite3Expr(pParse->db, TK_STRING, yymsp[-3].minor.yy502 ? "true" : "false"); + if( pB ) sqlite3ExprIdToTrueFalse(pB); + if( !ExprHasProperty(yymsp[-4].minor.yy590, EP_HasFunc) ){ + sqlite3ExprUnmapAndDelete(pParse, yymsp[-4].minor.yy590); + yymsp[-4].minor.yy590 = pB; + }else{ + yymsp[-4].minor.yy590 = sqlite3PExpr(pParse, yymsp[-3].minor.yy502 ? TK_OR : TK_AND, pB, yymsp[-4].minor.yy590); + } }else{ Expr *pRHS = yymsp[-1].minor.yy402->a[0].pExpr; if( yymsp[-1].minor.yy402->nExpr==1 && sqlite3ExprIsConstant(pParse,pRHS) && yymsp[-4].minor.yy590->op!=TK_VECTOR ){ @@ -179012,6 +180104,10 @@ static YYACTIONTYPE yy_reduce( { sqlite3BeginTrigger(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0, yymsp[-5].minor.yy502, yymsp[-4].minor.yy28.a, yymsp[-4].minor.yy28.b, yymsp[-2].minor.yy563, yymsp[0].minor.yy590, yymsp[-10].minor.yy502, yymsp[-8].minor.yy502); yymsp[-10].minor.yy0 = (yymsp[-6].minor.yy0.n==0?yymsp[-7].minor.yy0:yymsp[-6].minor.yy0); /*A-overwrites-T*/ +#ifdef SQLITE_DEBUG + assert( pParse->isCreate ); /* Set by createkw reduce action */ + pParse->isCreate = 0; /* But, should not be set for CREATE TRIGGER */ +#endif } break; case 262: /* trigger_time ::= BEFORE|AFTER */ @@ -180454,7 +181550,7 @@ static int getToken(const unsigned char **pz){ int t; /* Token type to return */ do { z += sqlite3GetToken(z, &t); - }while( t==TK_SPACE ); + }while( t==TK_SPACE || t==TK_COMMENT ); if( t==TK_ID || t==TK_STRING || t==TK_JOIN_KW @@ -180947,7 +182043,11 @@ SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql){ assert( n==6 ); tokenType = analyzeFilterKeyword((const u8*)&zSql[6], lastTokenParsed); #endif /* SQLITE_OMIT_WINDOWFUNC */ - }else if( tokenType==TK_COMMENT && (db->flags & SQLITE_Comments)!=0 ){ + }else if( tokenType==TK_COMMENT + && (db->init.busy || (db->flags & SQLITE_Comments)!=0) + ){ + /* Ignore SQL comments if either (1) we are reparsing the schema or + ** (2) SQLITE_DBCONFIG_ENABLE_COMMENTS is turned on (the default). */ zSql += n; continue; }else if( tokenType!=TK_QNUMBER ){ @@ -181842,6 +182942,14 @@ SQLITE_API int sqlite3_initialize(void){ if( rc==SQLITE_OK ){ sqlite3PCacheBufferSetup( sqlite3GlobalConfig.pPage, sqlite3GlobalConfig.szPage, sqlite3GlobalConfig.nPage); +#ifdef SQLITE_EXTRA_INIT_MUTEXED + { + int SQLITE_EXTRA_INIT_MUTEXED(const char*); + rc = SQLITE_EXTRA_INIT_MUTEXED(0); + } +#endif + } + if( rc==SQLITE_OK ){ sqlite3MemoryBarrier(); sqlite3GlobalConfig.isInit = 1; #ifdef SQLITE_EXTRA_INIT @@ -182298,17 +183406,22 @@ SQLITE_API int sqlite3_config(int op, ...){ ** If lookaside is already active, return SQLITE_BUSY. ** ** The sz parameter is the number of bytes in each lookaside slot. -** The cnt parameter is the number of slots. If pStart is NULL the -** space for the lookaside memory is obtained from sqlite3_malloc(). -** If pStart is not NULL then it is sz*cnt bytes of memory to use for -** the lookaside memory. +** The cnt parameter is the number of slots. If pBuf is NULL the +** space for the lookaside memory is obtained from sqlite3_malloc() +** or similar. If pBuf is not NULL then it is sz*cnt bytes of memory +** to use for the lookaside memory. */ -static int setupLookaside(sqlite3 *db, void *pBuf, int sz, int cnt){ +static int setupLookaside( + sqlite3 *db, /* Database connection being configured */ + void *pBuf, /* Memory to use for lookaside. May be NULL */ + int sz, /* Desired size of each lookaside memory slot */ + int cnt /* Number of slots to allocate */ +){ #ifndef SQLITE_OMIT_LOOKASIDE - void *pStart; - sqlite3_int64 szAlloc; - int nBig; /* Number of full-size slots */ - int nSm; /* Number smaller LOOKASIDE_SMALL-byte slots */ + void *pStart; /* Start of the lookaside buffer */ + sqlite3_int64 szAlloc; /* Total space set aside for lookaside memory */ + int nBig; /* Number of full-size slots */ + int nSm; /* Number smaller LOOKASIDE_SMALL-byte slots */ if( sqlite3LookasideUsed(db,0)>0 ){ return SQLITE_BUSY; @@ -182321,19 +183434,22 @@ static int setupLookaside(sqlite3 *db, void *pBuf, int sz, int cnt){ sqlite3_free(db->lookaside.pStart); } /* The size of a lookaside slot after ROUNDDOWN8 needs to be larger - ** than a pointer to be useful. + ** than a pointer and small enough to fit in a u16. */ - sz = ROUNDDOWN8(sz); /* IMP: R-33038-09382 */ + sz = ROUNDDOWN8(sz); if( sz<=(int)sizeof(LookasideSlot*) ) sz = 0; if( sz>65528 ) sz = 65528; - if( cnt<0 ) cnt = 0; + /* Count must be at least 1 to be useful, but not so large as to use + ** more than 0x7fff0000 total bytes for lookaside. */ + if( cnt<1 ) cnt = 0; + if( sz>0 && cnt>(0x7fff0000/sz) ) cnt = 0x7fff0000/sz; szAlloc = (i64)sz*(i64)cnt; - if( sz==0 || cnt==0 ){ + if( szAlloc==0 ){ sz = 0; pStart = 0; }else if( pBuf==0 ){ sqlite3BeginBenignMalloc(); - pStart = sqlite3Malloc( szAlloc ); /* IMP: R-61949-35727 */ + pStart = sqlite3Malloc( szAlloc ); sqlite3EndBenignMalloc(); if( pStart ) szAlloc = sqlite3MallocSize(pStart); }else{ @@ -183310,6 +184426,9 @@ SQLITE_API int sqlite3_busy_handler( db->busyHandler.pBusyArg = pArg; db->busyHandler.nBusy = 0; db->busyTimeout = 0; +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + db->setlkTimeout = 0; +#endif sqlite3_mutex_leave(db->mutex); return SQLITE_OK; } @@ -183359,12 +184478,49 @@ SQLITE_API int sqlite3_busy_timeout(sqlite3 *db, int ms){ sqlite3_busy_handler(db, (int(*)(void*,int))sqliteDefaultBusyCallback, (void*)db); db->busyTimeout = ms; +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + db->setlkTimeout = ms; +#endif }else{ sqlite3_busy_handler(db, 0, 0); } return SQLITE_OK; } +/* +** Set the setlk timeout value. +*/ +SQLITE_API int sqlite3_setlk_timeout(sqlite3 *db, int ms, int flags){ +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + int iDb; + int bBOC = ((flags & SQLITE_SETLK_BLOCK_ON_CONNECT) ? 1 : 0); +#endif +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT; +#endif + if( ms<-1 ) return SQLITE_RANGE; +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + sqlite3_mutex_enter(db->mutex); + db->setlkTimeout = ms; + db->setlkFlags = flags; + sqlite3BtreeEnterAll(db); + for(iDb=0; iDbnDb; iDb++){ + Btree *pBt = db->aDb[iDb].pBt; + if( pBt ){ + sqlite3_file *fd = sqlite3PagerFile(sqlite3BtreePager(pBt)); + sqlite3OsFileControlHint(fd, SQLITE_FCNTL_BLOCK_ON_CONNECT, (void*)&bBOC); + } + } + sqlite3BtreeLeaveAll(db); + sqlite3_mutex_leave(db->mutex); +#endif +#if !defined(SQLITE_ENABLE_API_ARMOR) && !defined(SQLITE_ENABLE_SETLK_TIMEOUT) + UNUSED_PARAMETER(db); + UNUSED_PARAMETER(flags); +#endif + return SQLITE_OK; +} + /* ** Cause any pending operation to stop at its earliest opportunity. */ @@ -185330,7 +186486,7 @@ SQLITE_API int sqlite3_set_clientdata( return SQLITE_OK; }else{ size_t n = strlen(zName); - p = sqlite3_malloc64( sizeof(DbClientData)+n+1 ); + p = sqlite3_malloc64( SZ_DBCLIENTDATA(n+1) ); if( p==0 ){ if( xDestructor ) xDestructor(pData); sqlite3_mutex_leave(db->mutex); @@ -185484,13 +186640,10 @@ SQLITE_API int sqlite3_table_column_metadata( if( zColumnName==0 ){ /* Query for existence of table only */ }else{ - for(iCol=0; iColnCol; iCol++){ + iCol = sqlite3ColumnIndex(pTab, zColumnName); + if( iCol>=0 ){ pCol = &pTab->aCol[iCol]; - if( 0==sqlite3StrICmp(pCol->zCnName, zColumnName) ){ - break; - } - } - if( iCol==pTab->nCol ){ + }else{ if( HasRowid(pTab) && sqlite3IsRowid(zColumnName) ){ iCol = pTab->iPKey; pCol = iCol>=0 ? &pTab->aCol[iCol] : 0; @@ -185699,8 +186852,8 @@ SQLITE_API int sqlite3_test_control(int op, ...){ /* sqlite3_test_control(SQLITE_TESTCTRL_FK_NO_ACTION, sqlite3 *db, int b); ** ** If b is true, then activate the SQLITE_FkNoAction setting. If b is - ** false then clearn that setting. If the SQLITE_FkNoAction setting is - ** abled, all foreign key ON DELETE and ON UPDATE actions behave as if + ** false then clear that setting. If the SQLITE_FkNoAction setting is + ** enabled, all foreign key ON DELETE and ON UPDATE actions behave as if ** they were NO ACTION, regardless of how they are defined. ** ** NB: One must usually run "PRAGMA writable_schema=RESET" after @@ -187047,7 +188200,7 @@ SQLITE_PRIVATE void sqlite3ConnectionClosed(sqlite3 *db){ ** Here, array { X } means zero or more occurrences of X, adjacent in ** memory. A "position" is an index of a token in the token stream ** generated by the tokenizer. Note that POS_END and POS_COLUMN occur -** in the same logical place as the position element, and act as sentinals +** in the same logical place as the position element, and act as sentinels ** ending a position list array. POS_END is 0. POS_COLUMN is 1. ** The positions numbers are not stored literally but rather as two more ** than the difference from the prior position, or the just the position plus @@ -187266,6 +188419,13 @@ SQLITE_PRIVATE void sqlite3ConnectionClosed(sqlite3 *db){ #ifndef _FTSINT_H #define _FTSINT_H +/* #include */ +/* #include */ +/* #include */ +/* #include */ +/* #include */ +/* #include */ + #if !defined(NDEBUG) && !defined(SQLITE_DEBUG) # define NDEBUG 1 #endif @@ -187735,6 +188895,19 @@ typedef sqlite3_int64 i64; /* 8-byte signed integer */ #define deliberate_fall_through +/* +** Macros needed to provide flexible arrays in a portable way +*/ +#ifndef offsetof +# define offsetof(STRUCTURE,FIELD) ((size_t)((char*)&((STRUCTURE*)0)->FIELD)) +#endif +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) +# define FLEXARRAY +#else +# define FLEXARRAY 1 +#endif + + #endif /* SQLITE_AMALGAMATION */ #ifdef SQLITE_DEBUG @@ -187839,7 +189012,7 @@ struct Fts3Table { #endif #if defined(SQLITE_DEBUG) || defined(SQLITE_TEST) - /* True to disable the incremental doclist optimization. This is controled + /* True to disable the incremental doclist optimization. This is controlled ** by special insert command 'test-no-incr-doclist'. */ int bNoIncrDoclist; @@ -187891,7 +189064,7 @@ struct Fts3Cursor { /* ** The Fts3Cursor.eSearch member is always set to one of the following. -** Actualy, Fts3Cursor.eSearch can be greater than or equal to +** Actually, Fts3Cursor.eSearch can be greater than or equal to ** FTS3_FULLTEXT_SEARCH. If so, then Fts3Cursor.eSearch - 2 is the index ** of the column to be searched. For example, in ** @@ -187964,9 +189137,13 @@ struct Fts3Phrase { */ int nToken; /* Number of tokens in the phrase */ int iColumn; /* Index of column this phrase must match */ - Fts3PhraseToken aToken[1]; /* One entry for each token in the phrase */ + Fts3PhraseToken aToken[FLEXARRAY]; /* One for each token in the phrase */ }; +/* Size (in bytes) of an Fts3Phrase object large enough to hold N tokens */ +#define SZ_FTS3PHRASE(N) \ + (offsetof(Fts3Phrase,aToken)+(N)*sizeof(Fts3PhraseToken)) + /* ** A tree of these objects forms the RHS of a MATCH operator. ** @@ -188200,12 +189377,6 @@ SQLITE_PRIVATE int sqlite3Fts3IntegrityCheck(Fts3Table *p, int *pbOk); # define SQLITE_CORE 1 #endif -/* #include */ -/* #include */ -/* #include */ -/* #include */ -/* #include */ -/* #include */ /* #include "fts3.h" */ #ifndef SQLITE_CORE @@ -190544,7 +191715,7 @@ static int fts3DoclistOrMerge( ** sizes of the two inputs, plus enough space for exactly one of the input ** docids to grow. ** - ** A symetric argument may be made if the doclists are in descending + ** A symmetric argument may be made if the doclists are in descending ** order. */ aOut = sqlite3_malloc64((i64)n1+n2+FTS3_VARINT_MAX-1+FTS3_BUFFER_PADDING); @@ -192343,7 +193514,7 @@ static int fts3EvalDeferredPhrase(Fts3Cursor *pCsr, Fts3Phrase *pPhrase){ nDistance = iPrev - nMaxUndeferred; } - aOut = (char *)sqlite3Fts3MallocZero(nPoslist+FTS3_BUFFER_PADDING); + aOut = (char *)sqlite3Fts3MallocZero(((i64)nPoslist)+FTS3_BUFFER_PADDING); if( !aOut ){ sqlite3_free(aPoslist); return SQLITE_NOMEM; @@ -192642,7 +193813,7 @@ static int incrPhraseTokenNext( ** ** * does not contain any deferred tokens. ** -** Advance it to the next matching documnent in the database and populate +** Advance it to the next matching document in the database and populate ** the Fts3Doclist.pList and nList fields. ** ** If there is no "next" entry and no error occurs, then *pbEof is set to @@ -193649,7 +194820,7 @@ static int fts3EvalNext(Fts3Cursor *pCsr){ } /* -** Restart interation for expression pExpr so that the next call to +** Restart iteration for expression pExpr so that the next call to ** fts3EvalNext() visits the first row. Do not allow incremental ** loading or merging of phrase doclists for this iteration. ** @@ -194841,6 +196012,23 @@ SQLITE_PRIVATE int sqlite3Fts3OpenTokenizer( */ static int fts3ExprParse(ParseContext *, const char *, int, Fts3Expr **, int *); +/* +** Search buffer z[], size n, for a '"' character. Or, if enable_parenthesis +** is defined, search for '(' and ')' as well. Return the index of the first +** such character in the buffer. If there is no such character, return -1. +*/ +static int findBarredChar(const char *z, int n){ + int ii; + for(ii=0; iiiLangid, z, i, &pCursor); + *pnConsumed = n; + rc = sqlite3Fts3OpenTokenizer(pTokenizer, pParse->iLangid, z, n, &pCursor); if( rc==SQLITE_OK ){ const char *zToken; int nToken = 0, iStart = 0, iEnd = 0, iPosition = 0; @@ -194882,7 +196063,18 @@ static int getNextToken( rc = pModule->xNext(pCursor, &zToken, &nToken, &iStart, &iEnd, &iPosition); if( rc==SQLITE_OK ){ - nByte = sizeof(Fts3Expr) + sizeof(Fts3Phrase) + nToken; + /* Check that this tokenization did not gobble up any " characters. Or, + ** if enable_parenthesis is true, that it did not gobble up any + ** open or close parenthesis characters either. If it did, call + ** getNextToken() again, but pass only that part of the input buffer + ** up to the first such character. */ + int iBarred = findBarredChar(z, iEnd); + if( iBarred>=0 ){ + pModule->xClose(pCursor); + return getNextToken(pParse, iCol, z, iBarred, ppExpr, pnConsumed); + } + + nByte = sizeof(Fts3Expr) + SZ_FTS3PHRASE(1) + nToken; pRet = (Fts3Expr *)sqlite3Fts3MallocZero(nByte); if( !pRet ){ rc = SQLITE_NOMEM; @@ -194892,7 +196084,7 @@ static int getNextToken( pRet->pPhrase->nToken = 1; pRet->pPhrase->iColumn = iCol; pRet->pPhrase->aToken[0].n = nToken; - pRet->pPhrase->aToken[0].z = (char *)&pRet->pPhrase[1]; + pRet->pPhrase->aToken[0].z = (char*)&pRet->pPhrase->aToken[1]; memcpy(pRet->pPhrase->aToken[0].z, zToken, nToken); if( iEnd=0 ){ + *pnConsumed = iBarred; + } rc = SQLITE_OK; } @@ -194963,9 +196159,9 @@ static int getNextString( Fts3Expr *p = 0; sqlite3_tokenizer_cursor *pCursor = 0; char *zTemp = 0; - int nTemp = 0; + i64 nTemp = 0; - const int nSpace = sizeof(Fts3Expr) + sizeof(Fts3Phrase); + const int nSpace = sizeof(Fts3Expr) + SZ_FTS3PHRASE(1); int nToken = 0; /* The final Fts3Expr data structure, including the Fts3Phrase, @@ -195337,7 +196533,7 @@ static int fts3ExprParse( /* The isRequirePhrase variable is set to true if a phrase or ** an expression contained in parenthesis is required. If a - ** binary operator (AND, OR, NOT or NEAR) is encounted when + ** binary operator (AND, OR, NOT or NEAR) is encountered when ** isRequirePhrase is set, this is a syntax error. */ if( !isPhrase && isRequirePhrase ){ @@ -195919,7 +197115,6 @@ static void fts3ExprTestCommon( } if( rc!=SQLITE_OK && rc!=SQLITE_NOMEM ){ - sqlite3Fts3ExprFree(pExpr); sqlite3_result_error(context, "Error parsing expression", -1); }else if( rc==SQLITE_NOMEM || !(zBuf = exprToString(pExpr, 0)) ){ sqlite3_result_error_nomem(context); @@ -196162,7 +197357,7 @@ static void fts3HashInsertElement( } -/* Resize the hash table so that it cantains "new_size" buckets. +/* Resize the hash table so that it contains "new_size" buckets. ** "new_size" must be a power of 2. The hash table might fail ** to resize if sqliteMalloc() fails. ** @@ -196617,7 +197812,7 @@ static int star_oh(const char *z){ /* ** If the word ends with zFrom and xCond() is true for the stem -** of the word that preceeds the zFrom ending, then change the +** of the word that precedes the zFrom ending, then change the ** ending to zTo. ** ** The input word *pz and zFrom are both in reverse order. zTo @@ -198128,7 +199323,7 @@ static int fts3tokFilterMethod( fts3tokResetCursor(pCsr); if( idxNum==1 ){ const char *zByte = (const char *)sqlite3_value_text(apVal[0]); - int nByte = sqlite3_value_bytes(apVal[0]); + sqlite3_int64 nByte = sqlite3_value_bytes(apVal[0]); pCsr->zInput = sqlite3_malloc64(nByte+1); if( pCsr->zInput==0 ){ rc = SQLITE_NOMEM; @@ -202200,7 +203395,7 @@ static int fts3IncrmergePush( ** ** It is assumed that the buffer associated with pNode is already large ** enough to accommodate the new entry. The buffer associated with pPrev -** is extended by this function if requrired. +** is extended by this function if required. ** ** If an error (i.e. OOM condition) occurs, an SQLite error code is ** returned. Otherwise, SQLITE_OK. @@ -203863,7 +205058,7 @@ SQLITE_PRIVATE int sqlite3Fts3DeferToken( /* ** SQLite value pRowid contains the rowid of a row that may or may not be ** present in the FTS3 table. If it is, delete it and adjust the contents -** of subsiduary data structures accordingly. +** of subsidiary data structures accordingly. */ static int fts3DeleteByRowid( Fts3Table *p, @@ -204189,9 +205384,13 @@ struct MatchinfoBuffer { int nElem; int bGlobal; /* Set if global data is loaded */ char *zMatchinfo; - u32 aMatchinfo[1]; + u32 aMI[FLEXARRAY]; }; +/* Size (in bytes) of a MatchinfoBuffer sufficient for N elements */ +#define SZ_MATCHINFOBUFFER(N) \ + (offsetof(MatchinfoBuffer,aMI)+(((N)+1)/2)*sizeof(u64)) + /* ** The snippet() and offsets() functions both return text values. An instance @@ -204216,13 +205415,13 @@ struct StrBuffer { static MatchinfoBuffer *fts3MIBufferNew(size_t nElem, const char *zMatchinfo){ MatchinfoBuffer *pRet; sqlite3_int64 nByte = sizeof(u32) * (2*(sqlite3_int64)nElem + 1) - + sizeof(MatchinfoBuffer); + + SZ_MATCHINFOBUFFER(1); sqlite3_int64 nStr = strlen(zMatchinfo); pRet = sqlite3Fts3MallocZero(nByte + nStr+1); if( pRet ){ - pRet->aMatchinfo[0] = (u8*)(&pRet->aMatchinfo[1]) - (u8*)pRet; - pRet->aMatchinfo[1+nElem] = pRet->aMatchinfo[0] + pRet->aMI[0] = (u8*)(&pRet->aMI[1]) - (u8*)pRet; + pRet->aMI[1+nElem] = pRet->aMI[0] + sizeof(u32)*((int)nElem+1); pRet->nElem = (int)nElem; pRet->zMatchinfo = ((char*)pRet) + nByte; @@ -204236,10 +205435,10 @@ static MatchinfoBuffer *fts3MIBufferNew(size_t nElem, const char *zMatchinfo){ static void fts3MIBufferFree(void *p){ MatchinfoBuffer *pBuf = (MatchinfoBuffer*)((u8*)p - ((u32*)p)[-1]); - assert( (u32*)p==&pBuf->aMatchinfo[1] - || (u32*)p==&pBuf->aMatchinfo[pBuf->nElem+2] + assert( (u32*)p==&pBuf->aMI[1] + || (u32*)p==&pBuf->aMI[pBuf->nElem+2] ); - if( (u32*)p==&pBuf->aMatchinfo[1] ){ + if( (u32*)p==&pBuf->aMI[1] ){ pBuf->aRef[1] = 0; }else{ pBuf->aRef[2] = 0; @@ -204256,18 +205455,18 @@ static void (*fts3MIBufferAlloc(MatchinfoBuffer *p, u32 **paOut))(void*){ if( p->aRef[1]==0 ){ p->aRef[1] = 1; - aOut = &p->aMatchinfo[1]; + aOut = &p->aMI[1]; xRet = fts3MIBufferFree; } else if( p->aRef[2]==0 ){ p->aRef[2] = 1; - aOut = &p->aMatchinfo[p->nElem+2]; + aOut = &p->aMI[p->nElem+2]; xRet = fts3MIBufferFree; }else{ aOut = (u32*)sqlite3_malloc64(p->nElem * sizeof(u32)); if( aOut ){ xRet = sqlite3_free; - if( p->bGlobal ) memcpy(aOut, &p->aMatchinfo[1], p->nElem*sizeof(u32)); + if( p->bGlobal ) memcpy(aOut, &p->aMI[1], p->nElem*sizeof(u32)); } } @@ -204277,7 +205476,7 @@ static void (*fts3MIBufferAlloc(MatchinfoBuffer *p, u32 **paOut))(void*){ static void fts3MIBufferSetGlobal(MatchinfoBuffer *p){ p->bGlobal = 1; - memcpy(&p->aMatchinfo[2+p->nElem], &p->aMatchinfo[1], p->nElem*sizeof(u32)); + memcpy(&p->aMI[2+p->nElem], &p->aMI[1], p->nElem*sizeof(u32)); } /* @@ -204692,7 +205891,7 @@ static int fts3StringAppend( } /* If there is insufficient space allocated at StrBuffer.z, use realloc() - ** to grow the buffer until so that it is big enough to accomadate the + ** to grow the buffer until so that it is big enough to accommodate the ** appended data. */ if( pStr->n+nAppend+1>=pStr->nAlloc ){ @@ -205104,16 +206303,16 @@ static size_t fts3MatchinfoSize(MatchInfo *pInfo, char cArg){ break; case FTS3_MATCHINFO_LHITS: - nVal = pInfo->nCol * pInfo->nPhrase; + nVal = (size_t)pInfo->nCol * pInfo->nPhrase; break; case FTS3_MATCHINFO_LHITS_BM: - nVal = pInfo->nPhrase * ((pInfo->nCol + 31) / 32); + nVal = (size_t)pInfo->nPhrase * ((pInfo->nCol + 31) / 32); break; default: assert( cArg==FTS3_MATCHINFO_HITS ); - nVal = pInfo->nCol * pInfo->nPhrase * 3; + nVal = (size_t)pInfo->nCol * pInfo->nPhrase * 3; break; } @@ -206671,8 +207870,8 @@ SQLITE_PRIVATE int sqlite3FtsUnicodeFold(int c, int eRemoveDiacritic){ ** Beginning with version 3.45.0 (circa 2024-01-01), these routines also ** accept BLOB values that have JSON encoded using a binary representation ** called "JSONB". The name JSONB comes from PostgreSQL, however the on-disk -** format SQLite JSONB is completely different and incompatible with -** PostgreSQL JSONB. +** format for SQLite-JSONB is completely different and incompatible with +** PostgreSQL-JSONB. ** ** Decoding and interpreting JSONB is still O(N) where N is the size of ** the input, the same as text JSON. However, the constant of proportionality @@ -206729,7 +207928,7 @@ SQLITE_PRIVATE int sqlite3FtsUnicodeFold(int c, int eRemoveDiacritic){ ** ** The payload size need not be expressed in its minimal form. For example, ** if the payload size is 10, the size can be expressed in any of 5 different -** ways: (1) (X>>4)==10, (2) (X>>4)==12 following by on 0x0a byte, +** ways: (1) (X>>4)==10, (2) (X>>4)==12 following by one 0x0a byte, ** (3) (X>>4)==13 followed by 0x00 and 0x0a, (4) (X>>4)==14 followed by ** 0x00 0x00 0x00 0x0a, or (5) (X>>4)==15 followed by 7 bytes of 0x00 and ** a single byte of 0x0a. The shorter forms are preferred, of course, but @@ -206739,7 +207938,7 @@ SQLITE_PRIVATE int sqlite3FtsUnicodeFold(int c, int eRemoveDiacritic){ ** the size when it becomes known, resulting in a non-minimal encoding. ** ** The value (X>>4)==15 is not actually used in the current implementation -** (as SQLite is currently unable handle BLOBs larger than about 2GB) +** (as SQLite is currently unable to handle BLOBs larger than about 2GB) ** but is included in the design to allow for future enhancements. ** ** The payload follows the header. NULL, TRUE, and FALSE have no payload and @@ -206799,23 +207998,47 @@ static const char * const jsonbType[] = { ** increase for the text-JSON parser. (Ubuntu14.10 gcc 4.8.4 x64 with -Os). */ static const char jsonIsSpace[] = { - 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +#ifdef SQLITE_ASCII +/*0 1 2 3 4 5 6 7 8 9 a b c d e f */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, /* 0 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1 */ + 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 2 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 3 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 4 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 5 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 6 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 7 */ + + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 8 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 9 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* a */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* b */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* c */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* d */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* e */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* f */ +#endif +#ifdef SQLITE_EBCDIC +/*0 1 2 3 4 5 6 7 8 9 a b c d e f */ + 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, /* 0 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1 */ + 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 2 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 3 */ + 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 4 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 5 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 6 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 7 */ + + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 8 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 9 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* a */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* b */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* c */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* d */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* e */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* f */ +#endif + }; #define jsonIsspace(x) (jsonIsSpace[(unsigned char)x]) @@ -206823,7 +208046,13 @@ static const char jsonIsSpace[] = { ** The set of all space characters recognized by jsonIsspace(). ** Useful as the second argument to strspn(). */ +#ifdef SQLITE_ASCII static const char jsonSpaces[] = "\011\012\015\040"; +#endif +#ifdef SQLITE_EBCDIC +static const char jsonSpaces[] = "\005\045\015\100"; +#endif + /* ** Characters that are special to JSON. Control characters, @@ -206832,23 +208061,46 @@ static const char jsonSpaces[] = "\011\012\015\040"; ** it in the set of special characters. */ static const char jsonIsOk[256] = { - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 +#ifdef SQLITE_ASCII +/*0 1 2 3 4 5 6 7 8 9 a b c d e f */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1 */ + 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, /* 2 */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 3 */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 4 */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, /* 5 */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 6 */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 7 */ + + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 8 */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 9 */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* a */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* b */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* c */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* d */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* e */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 /* f */ +#endif +#ifdef SQLITE_EBCDIC +/*0 1 2 3 4 5 6 7 8 9 a b c d e f */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 2 */ + 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, /* 3 */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 4 */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 5 */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 6 */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, /* 7 */ + + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 8 */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 9 */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* a */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* b */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* c */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* d */ + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* e */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 /* f */ +#endif }; /* Objects */ @@ -206993,7 +208245,7 @@ struct JsonParse { ** Forward references **************************************************************************/ static void jsonReturnStringAsBlob(JsonString*); -static int jsonFuncArgMightBeBinary(sqlite3_value *pJson); +static int jsonArgIsJsonb(sqlite3_value *pJson, JsonParse *p); static u32 jsonTranslateBlobToText(const JsonParse*,u32,JsonString*); static void jsonReturnParse(sqlite3_context*,JsonParse*); static JsonParse *jsonParseFuncArg(sqlite3_context*,sqlite3_value*,u32); @@ -207067,7 +208319,7 @@ static int jsonCacheInsert( ** most-recently used entry if it isn't so already. ** ** The JsonParse object returned still belongs to the Cache and might -** be deleted at any moment. If the caller whants the JsonParse to +** be deleted at any moment. If the caller wants the JsonParse to ** linger, it needs to increment the nPJRef reference counter. */ static JsonParse *jsonCacheSearch( @@ -207411,11 +208663,9 @@ static void jsonAppendSqlValue( break; } default: { - if( jsonFuncArgMightBeBinary(pValue) ){ - JsonParse px; - memset(&px, 0, sizeof(px)); - px.aBlob = (u8*)sqlite3_value_blob(pValue); - px.nBlob = sqlite3_value_bytes(pValue); + JsonParse px; + memset(&px, 0, sizeof(px)); + if( jsonArgIsJsonb(pValue, &px) ){ jsonTranslateBlobToText(&px, 0, p); }else if( p->eErr==0 ){ sqlite3_result_error(p->pCtx, "JSON cannot hold BLOB values", -1); @@ -207734,7 +208984,7 @@ static void jsonWrongNumArgs( */ static int jsonBlobExpand(JsonParse *pParse, u32 N){ u8 *aNew; - u32 t; + u64 t; assert( N>pParse->nBlobAlloc ); if( pParse->nBlobAlloc==0 ){ t = 100; @@ -207744,8 +208994,9 @@ static int jsonBlobExpand(JsonParse *pParse, u32 N){ if( tdb, pParse->aBlob, t); if( aNew==0 ){ pParse->oom = 1; return 1; } + assert( t<0x7fffffff ); pParse->aBlob = aNew; - pParse->nBlobAlloc = t; + pParse->nBlobAlloc = (u32)t; return 0; } @@ -207812,7 +209063,7 @@ static SQLITE_NOINLINE void jsonBlobExpandAndAppendNode( } -/* Append an node type byte together with the payload size and +/* Append a node type byte together with the payload size and ** possibly also the payload. ** ** If aPayload is not NULL, then it is a pointer to the payload which @@ -207881,8 +209132,10 @@ static int jsonBlobChangePayloadSize( nExtra = 1; }else if( szType==13 ){ nExtra = 2; - }else{ + }else if( szType==14 ){ nExtra = 4; + }else{ + nExtra = 8; } if( szPayload<=11 ){ nNeeded = 0; @@ -208352,7 +209605,12 @@ static int jsonTranslateTextToBlob(JsonParse *pParse, u32 i){ || c=='n' || c=='r' || c=='t' || (c=='u' && jsonIs4Hex(&z[j+1])) ){ if( opcode==JSONB_TEXT ) opcode = JSONB_TEXTJ; - }else if( c=='\'' || c=='0' || c=='v' || c=='\n' + }else if( c=='\'' || c=='v' || c=='\n' +#ifdef SQLITE_BUG_COMPATIBLE_20250510 + || (c=='0') /* Legacy bug compatible */ +#else + || (c=='0' && !sqlite3Isdigit(z[j+1])) /* Correct implementation */ +#endif || (0xe2==(u8)c && 0x80==(u8)z[j+1] && (0xa8==(u8)z[j+2] || 0xa9==(u8)z[j+2])) || (c=='x' && jsonIs2Hex(&z[j+1])) ){ @@ -208702,10 +209960,7 @@ static u32 jsonbPayloadSize(const JsonParse *pParse, u32 i, u32 *pSz){ u8 x; u32 sz; u32 n; - if( NEVER(i>pParse->nBlob) ){ - *pSz = 0; - return 0; - } + assert( i<=pParse->nBlob ); x = pParse->aBlob[i]>>4; if( x<=11 ){ sz = x; @@ -208742,15 +209997,15 @@ static u32 jsonbPayloadSize(const JsonParse *pParse, u32 i, u32 *pSz){ *pSz = 0; return 0; } - sz = (pParse->aBlob[i+5]<<24) + (pParse->aBlob[i+6]<<16) + + sz = ((u32)pParse->aBlob[i+5]<<24) + (pParse->aBlob[i+6]<<16) + (pParse->aBlob[i+7]<<8) + pParse->aBlob[i+8]; n = 9; } if( (i64)i+sz+n > pParse->nBlob && (i64)i+sz+n > pParse->nBlob-pParse->delta ){ - sz = 0; - n = 0; + *pSz = 0; + return 0; } *pSz = sz; return n; @@ -208847,9 +210102,12 @@ static u32 jsonTranslateBlobToText( } case JSONB_TEXT: case JSONB_TEXTJ: { - jsonAppendChar(pOut, '"'); - jsonAppendRaw(pOut, (const char*)&pParse->aBlob[i+n], sz); - jsonAppendChar(pOut, '"'); + if( pOut->nUsed+sz+2<=pOut->nAlloc || jsonStringGrow(pOut, sz+2)==0 ){ + pOut->zBuf[pOut->nUsed] = '"'; + memcpy(pOut->zBuf+pOut->nUsed+1,(const char*)&pParse->aBlob[i+n],sz); + pOut->zBuf[pOut->nUsed+sz+1] = '"'; + pOut->nUsed += sz+2; + } break; } case JSONB_TEXT5: { @@ -209088,33 +210346,6 @@ static u32 jsonTranslateBlobToPrettyText( return i; } - -/* Return true if the input pJson -** -** For performance reasons, this routine does not do a detailed check of the -** input BLOB to ensure that it is well-formed. Hence, false positives are -** possible. False negatives should never occur, however. -*/ -static int jsonFuncArgMightBeBinary(sqlite3_value *pJson){ - u32 sz, n; - const u8 *aBlob; - int nBlob; - JsonParse s; - if( sqlite3_value_type(pJson)!=SQLITE_BLOB ) return 0; - aBlob = sqlite3_value_blob(pJson); - nBlob = sqlite3_value_bytes(pJson); - if( nBlob<1 ) return 0; - if( NEVER(aBlob==0) || (aBlob[0] & 0x0f)>JSONB_OBJECT ) return 0; - memset(&s, 0, sizeof(s)); - s.aBlob = (u8*)aBlob; - s.nBlob = nBlob; - n = jsonbPayloadSize(&s, 0, &sz); - if( n==0 ) return 0; - if( sz+n!=(u32)nBlob ) return 0; - if( (aBlob[0] & 0x0f)<=JSONB_FALSE && sz>0 ) return 0; - return sz+n==(u32)nBlob; -} - /* ** Given that a JSONB_ARRAY object starts at offset i, return ** the number of entries in that array. @@ -209147,6 +210378,82 @@ static void jsonAfterEditSizeAdjust(JsonParse *pParse, u32 iRoot){ pParse->delta += jsonBlobChangePayloadSize(pParse, iRoot, sz); } +/* +** If the JSONB at aIns[0..nIns-1] can be expanded (by denormalizing the +** size field) by d bytes, then write the expansion into aOut[] and +** return true. In this way, an overwrite happens without changing the +** size of the JSONB, which reduces memcpy() operations and also make it +** faster and easier to update the B-Tree entry that contains the JSONB +** in the database. +** +** If the expansion of aIns[] by d bytes cannot be (easily) accomplished +** then return false. +** +** The d parameter is guaranteed to be between 1 and 8. +** +** This routine is an optimization. A correct answer is obtained if it +** always leaves the output unchanged and returns false. +*/ +static int jsonBlobOverwrite( + u8 *aOut, /* Overwrite here */ + const u8 *aIns, /* New content */ + u32 nIns, /* Bytes of new content */ + u32 d /* Need to expand new content by this much */ +){ + u32 szPayload; /* Bytes of payload */ + u32 i; /* New header size, after expansion & a loop counter */ + u8 szHdr; /* Size of header before expansion */ + + /* Lookup table for finding the upper 4 bits of the first byte of the + ** expanded aIns[], based on the size of the expanded aIns[] header: + ** + ** 2 3 4 5 6 7 8 9 */ + static const u8 aType[] = { 0xc0, 0xd0, 0, 0xe0, 0, 0, 0, 0xf0 }; + + if( (aIns[0]&0x0f)<=2 ) return 0; /* Cannot enlarge NULL, true, false */ + switch( aIns[0]>>4 ){ + default: { /* aIns[] header size 1 */ + if( ((1<=2 && i<=9 && aType[i-2]!=0 ); + aOut[0] = (aIns[0] & 0x0f) | aType[i-2]; + memcpy(&aOut[i], &aIns[szHdr], nIns-szHdr); + szPayload = nIns - szHdr; + while( 1/*edit-by-break*/ ){ + i--; + aOut[i] = szPayload & 0xff; + if( i==1 ) break; + szPayload >>= 8; + } + assert( (szPayload>>8)==0 ); + return 1; +} + /* ** Modify the JSONB blob at pParse->aBlob by removing nDel bytes of ** content beginning at iDel, and replacing them with nIns bytes of @@ -209168,6 +210475,11 @@ static void jsonBlobEdit( u32 nIns /* Bytes of content to insert */ ){ i64 d = (i64)nIns - (i64)nDel; + if( d<0 && d>=(-8) && aIns!=0 + && jsonBlobOverwrite(&pParse->aBlob[iDel], aIns, nIns, (int)-d) + ){ + return; + } if( d!=0 ){ if( pParse->nBlob + d > pParse->nBlobAlloc ){ jsonBlobExpand(pParse, pParse->nBlob+d); @@ -209179,7 +210491,9 @@ static void jsonBlobEdit( pParse->nBlob += d; pParse->delta += d; } - if( nIns && aIns ) memcpy(&pParse->aBlob[iDel], aIns, nIns); + if( nIns && aIns ){ + memcpy(&pParse->aBlob[iDel], aIns, nIns); + } } /* @@ -209264,7 +210578,21 @@ static u32 jsonUnescapeOneChar(const char *z, u32 n, u32 *piOut){ case 'r': { *piOut = '\r'; return 2; } case 't': { *piOut = '\t'; return 2; } case 'v': { *piOut = '\v'; return 2; } - case '0': { *piOut = 0; return 2; } + case '0': { + /* JSON5 requires that the \0 escape not be followed by a digit. + ** But SQLite did not enforce this restriction in versions 3.42.0 + ** through 3.49.2. That was a bug. But some applications might have + ** come to depend on that bug. Use the SQLITE_BUG_COMPATIBLE_20250510 + ** option to restore the old buggy behavior. */ +#ifdef SQLITE_BUG_COMPATIBLE_20250510 + /* Legacy bug-compatible behavior */ + *piOut = 0; +#else + /* Correct behavior */ + *piOut = (n>2 && sqlite3Isdigit(z[2])) ? JSON_INVALID_CHAR : 0; +#endif + return 2; + } case '\'': case '"': case '/': @@ -209764,7 +211092,7 @@ static void jsonReturnFromBlob( char *zOut; u32 nOut = sz; z = (const char*)&pParse->aBlob[i+n]; - zOut = sqlite3DbMallocRaw(db, nOut+1); + zOut = sqlite3DbMallocRaw(db, ((u64)nOut)+1); if( zOut==0 ) goto returnfromblob_oom; for(iIn=iOut=0; iInaBlob = (u8*)sqlite3_value_blob(pArg); - pParse->nBlob = sqlite3_value_bytes(pArg); - }else{ + if( !jsonArgIsJsonb(pArg, pParse) ){ sqlite3_result_error(ctx, "JSON cannot hold BLOB values", -1); return 1; } @@ -209942,7 +211267,7 @@ static char *jsonBadPathError( } /* argv[0] is a BLOB that seems likely to be a JSONB. Subsequent -** arguments come in parse where each pair contains a JSON path and +** arguments come in pairs where each pair contains a JSON path and ** content to insert or set at that patch. Do the updates ** and return the result. ** @@ -210013,27 +211338,46 @@ static void jsonInsertIntoBlob( /* ** If pArg is a blob that seems like a JSONB blob, then initialize ** p to point to that JSONB and return TRUE. If pArg does not seem like -** a JSONB blob, then return FALSE; -** -** This routine is only called if it is already known that pArg is a -** blob. The only open question is whether or not the blob appears -** to be a JSONB blob. +** a JSONB blob, then return FALSE. +** +** For small BLOBs (having no more than 7 bytes of payload) a full +** validity check is done. So for small BLOBs this routine only returns +** true if the value is guaranteed to be a valid JSONB. For larger BLOBs +** (8 byte or more of payload) only the size of the outermost element is +** checked to verify that the BLOB is superficially valid JSONB. +** +** A full JSONB validation is done on smaller BLOBs because those BLOBs might +** also be text JSON that has been incorrectly cast into a BLOB. +** (See tag-20240123-a and https://sqlite.org/forum/forumpost/012136abd5) +** If the BLOB is 9 bytes are larger, then it is not possible for the +** superficial size check done here to pass if the input is really text +** JSON so we do not need to look deeper in that case. +** +** Why we only need to do full JSONB validation for smaller BLOBs: +** +** The first byte of valid JSON text must be one of: '{', '[', '"', ' ', '\n', +** '\r', '\t', '-', or a digit '0' through '9'. Of these, only a subset +** can also be the first byte of JSONB: '{', '[', and digits '3' +** through '9'. In every one of those cases, the payload size is 7 bytes +** or less. So if we do full JSONB validation for every BLOB where the +** payload is less than 7 bytes, we will never get a false positive for +** JSONB on an input that is really text JSON. */ static int jsonArgIsJsonb(sqlite3_value *pArg, JsonParse *p){ u32 n, sz = 0; + u8 c; + if( sqlite3_value_type(pArg)!=SQLITE_BLOB ) return 0; p->aBlob = (u8*)sqlite3_value_blob(pArg); p->nBlob = (u32)sqlite3_value_bytes(pArg); - if( p->nBlob==0 ){ - p->aBlob = 0; - return 0; - } - if( NEVER(p->aBlob==0) ){ - return 0; - } - if( (p->aBlob[0] & 0x0f)<=JSONB_OBJECT + if( p->nBlob>0 + && ALWAYS(p->aBlob!=0) + && ((c = p->aBlob[0]) & 0x0f)<=JSONB_OBJECT && (n = jsonbPayloadSize(p, 0, &sz))>0 && sz+n==p->nBlob - && ((p->aBlob[0] & 0x0f)>JSONB_FALSE || sz==0) + && ((c & 0x0f)>JSONB_FALSE || sz==0) + && (sz>7 + || (c!=0x7b && c!=0x5b && !sqlite3Isdigit(c)) + || jsonbValidityCheck(p, 0, p->nBlob, 1)==0) ){ return 1; } @@ -210111,7 +211455,7 @@ static JsonParse *jsonParseFuncArg( ** JSON functions were suppose to work. From the beginning, blob was ** reserved for expansion and a blob value should have raised an error. ** But it did not, due to a bug. And many applications came to depend - ** upon this buggy behavior, espeically when using the CLI and reading + ** upon this buggy behavior, especially when using the CLI and reading ** JSON text using readfile(), which returns a blob. For this reason ** we will continue to support the bug moving forward. ** See for example https://sqlite.org/forum/forumpost/012136abd5292b8d @@ -211126,21 +212470,17 @@ static void jsonValidFunc( return; } case SQLITE_BLOB: { - if( jsonFuncArgMightBeBinary(argv[0]) ){ + JsonParse py; + memset(&py, 0, sizeof(py)); + if( jsonArgIsJsonb(argv[0], &py) ){ if( flags & 0x04 ){ /* Superficial checking only - accomplished by the - ** jsonFuncArgMightBeBinary() call above. */ + ** jsonArgIsJsonb() call above. */ res = 1; }else if( flags & 0x08 ){ /* Strict checking. Check by translating BLOB->TEXT->BLOB. If ** no errors occur, call that a "strict check". */ - JsonParse px; - u32 iErr; - memset(&px, 0, sizeof(px)); - px.aBlob = (u8*)sqlite3_value_blob(argv[0]); - px.nBlob = sqlite3_value_bytes(argv[0]); - iErr = jsonbValidityCheck(&px, 0, px.nBlob, 1); - res = iErr==0; + res = 0==jsonbValidityCheck(&py, 0, py.nBlob, 1); } break; } @@ -211198,9 +212538,7 @@ static void jsonErrorFunc( UNUSED_PARAMETER(argc); memset(&s, 0, sizeof(s)); s.db = sqlite3_context_db_handle(ctx); - if( jsonFuncArgMightBeBinary(argv[0]) ){ - s.aBlob = (u8*)sqlite3_value_blob(argv[0]); - s.nBlob = sqlite3_value_bytes(argv[0]); + if( jsonArgIsJsonb(argv[0], &s) ){ iErrPos = (i64)jsonbValidityCheck(&s, 0, s.nBlob, 1); }else{ s.zJson = (char*)sqlite3_value_text(argv[0]); @@ -211361,18 +212699,20 @@ static void jsonObjectStep( UNUSED_PARAMETER(argc); pStr = (JsonString*)sqlite3_aggregate_context(ctx, sizeof(*pStr)); if( pStr ){ + z = (const char*)sqlite3_value_text(argv[0]); + n = sqlite3Strlen30(z); if( pStr->zBuf==0 ){ jsonStringInit(pStr, ctx); jsonAppendChar(pStr, '{'); - }else if( pStr->nUsed>1 ){ + }else if( pStr->nUsed>1 && z!=0 ){ jsonAppendChar(pStr, ','); } pStr->pCtx = ctx; - z = (const char*)sqlite3_value_text(argv[0]); - n = sqlite3Strlen30(z); - jsonAppendString(pStr, z, n); - jsonAppendChar(pStr, ':'); - jsonAppendSqlValue(pStr, argv[1]); + if( z!=0 ){ + jsonAppendString(pStr, z, n); + jsonAppendChar(pStr, ':'); + jsonAppendSqlValue(pStr, argv[1]); + } } } static void jsonObjectCompute(sqlite3_context *ctx, int isFinal){ @@ -211885,9 +213225,8 @@ static int jsonEachFilter( memset(&p->sParse, 0, sizeof(p->sParse)); p->sParse.nJPRef = 1; p->sParse.db = p->db; - if( jsonFuncArgMightBeBinary(argv[0]) ){ - p->sParse.nBlob = sqlite3_value_bytes(argv[0]); - p->sParse.aBlob = (u8*)sqlite3_value_blob(argv[0]); + if( jsonArgIsJsonb(argv[0], &p->sParse) ){ + /* We have JSONB */ }else{ p->sParse.zJson = (char*)sqlite3_value_text(argv[0]); p->sParse.nJson = sqlite3_value_bytes(argv[0]); @@ -212181,6 +213520,8 @@ SQLITE_PRIVATE int sqlite3JsonTableFunctions(sqlite3 *db){ #endif SQLITE_PRIVATE int sqlite3GetToken(const unsigned char*,int*); /* In the SQLite core */ +/* #include */ + /* ** If building separately, we will need some setup that is normally ** found in sqliteInt.h @@ -212211,6 +213552,14 @@ typedef unsigned int u32; # define ALWAYS(X) (X) # define NEVER(X) (X) #endif +#ifndef offsetof +#define offsetof(STRUCTURE,FIELD) ((size_t)((char*)&((STRUCTURE*)0)->FIELD)) +#endif +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) +# define FLEXARRAY +#else +# define FLEXARRAY 1 +#endif #endif /* !defined(SQLITE_AMALGAMATION) */ /* Macro to check for 4-byte alignment. Only used inside of assert() */ @@ -212531,9 +213880,13 @@ struct RtreeMatchArg { RtreeGeomCallback cb; /* Info about the callback functions */ int nParam; /* Number of parameters to the SQL function */ sqlite3_value **apSqlParam; /* Original SQL parameter values */ - RtreeDValue aParam[1]; /* Values for parameters to the SQL function */ + RtreeDValue aParam[FLEXARRAY]; /* Values for parameters to the SQL function */ }; +/* Size of an RtreeMatchArg object with N parameters */ +#define SZ_RTREEMATCHARG(N) \ + (offsetof(RtreeMatchArg,aParam)+(N)*sizeof(RtreeDValue)) + #ifndef MAX # define MAX(x,y) ((x) < (y) ? (y) : (x)) #endif @@ -214222,7 +215575,7 @@ static int rtreeBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){ } /* -** Return the N-dimensional volumn of the cell stored in *p. +** Return the N-dimensional volume of the cell stored in *p. */ static RtreeDValue cellArea(Rtree *pRtree, RtreeCell *p){ RtreeDValue area = (RtreeDValue)1; @@ -215988,7 +217341,7 @@ static sqlite3_stmt *rtreeCheckPrepare( /* ** The second and subsequent arguments to this function are a printf() ** style format string and arguments. This function formats the string and -** appends it to the report being accumuated in pCheck. +** appends it to the report being accumulated in pCheck. */ static void rtreeCheckAppendMsg(RtreeCheck *pCheck, const char *zFmt, ...){ va_list ap; @@ -217176,7 +218529,7 @@ static void geopolyBBoxFinal( ** Determine if point (x0,y0) is beneath line segment (x1,y1)->(x2,y2). ** Returns: ** -** +2 x0,y0 is on the line segement +** +2 x0,y0 is on the line segment ** ** +1 x0,y0 is beneath line segment ** @@ -217282,7 +218635,7 @@ static void geopolyWithinFunc( sqlite3_free(p2); } -/* Objects used by the overlap algorihm. */ +/* Objects used by the overlap algorithm. */ typedef struct GeoEvent GeoEvent; typedef struct GeoSegment GeoSegment; typedef struct GeoOverlap GeoOverlap; @@ -218329,8 +219682,7 @@ static void geomCallback(sqlite3_context *ctx, int nArg, sqlite3_value **aArg){ sqlite3_int64 nBlob; int memErr = 0; - nBlob = sizeof(RtreeMatchArg) + (nArg-1)*sizeof(RtreeDValue) - + nArg*sizeof(sqlite3_value*); + nBlob = SZ_RTREEMATCHARG(nArg) + nArg*sizeof(sqlite3_value*); pBlob = (RtreeMatchArg *)sqlite3_malloc64(nBlob); if( !pBlob ){ sqlite3_result_error_nomem(ctx); @@ -219425,7 +220777,7 @@ SQLITE_PRIVATE void sqlite3Fts3IcuTokenizerModule( ** ** "RBU" stands for "Resumable Bulk Update". As in a large database update ** transmitted via a wireless network to a mobile device. A transaction -** applied using this extension is hence refered to as an "RBU update". +** applied using this extension is hence referred to as an "RBU update". ** ** ** LIMITATIONS @@ -219722,7 +221074,7 @@ SQLITE_API sqlite3rbu *sqlite3rbu_open( ** the next call to sqlite3rbu_vacuum() opens a handle that starts a ** new RBU vacuum operation. ** -** As with sqlite3rbu_open(), Zipvfs users should rever to the comment +** As with sqlite3rbu_open(), Zipvfs users should refer to the comment ** describing the sqlite3rbu_create_vfs() API function below for ** a description of the complications associated with using RBU with ** zipvfs databases. @@ -219818,7 +221170,7 @@ SQLITE_API int sqlite3rbu_savestate(sqlite3rbu *pRbu); ** ** If the RBU update has been completely applied, mark the RBU database ** as fully applied. Otherwise, assuming no error has occurred, save the -** current state of the RBU update appliation to the RBU database. +** current state of the RBU update application to the RBU database. ** ** If an error has already occurred as part of an sqlite3rbu_step() ** or sqlite3rbu_open() call, or if one occurs within this function, an @@ -224744,7 +226096,7 @@ static int rbuVfsFileSize(sqlite3_file *pFile, sqlite_int64 *pSize){ /* If this is an RBU vacuum operation and this is the target database, ** pretend that it has at least one page. Otherwise, SQLite will not - ** check for the existance of a *-wal file. rbuVfsRead() contains + ** check for the existence of a *-wal file. rbuVfsRead() contains ** similar logic. */ if( rc==SQLITE_OK && *pSize==0 && p->pRbu && rbuIsVacuum(p->pRbu) @@ -226676,8 +228028,8 @@ static int dbpageUpdate( /* "INSERT INTO dbpage($PGNO,NULL)" causes page number $PGNO and ** all subsequent pages to be deleted. */ pTab->iDbTrunc = iDb; - pgno--; - pTab->pgnoTrunc = pgno; + pTab->pgnoTrunc = pgno-1; + pgno = 1; }else{ zErr = "bad page value"; goto update_fail; @@ -227974,7 +229326,7 @@ static int sessionTableInfo( /* ** This function is called to initialize the SessionTable.nCol, azCol[] ** abPK[] and azDflt[] members of SessionTable object pTab. If these -** fields are already initilialized, this function is a no-op. +** fields are already initialized, this function is a no-op. ** ** If an error occurs, an error code is stored in sqlite3_session.rc and ** non-zero returned. Or, if no error occurs but the table has no primary @@ -227993,6 +229345,8 @@ static int sessionInitTable( if( pTab->nCol==0 ){ u8 *abPK; assert( pTab->azCol==0 || pTab->abPK==0 ); + sqlite3_free(pTab->azCol); + pTab->abPK = 0; rc = sessionTableInfo(pSession, db, zDb, pTab->zName, &pTab->nCol, &pTab->nTotalCol, 0, &pTab->azCol, &pTab->azDflt, &pTab->aiIdx, &abPK, @@ -229000,7 +230354,9 @@ SQLITE_API int sqlite3session_diff( SessionTable *pTo; /* Table zTbl */ /* Locate and if necessary initialize the target table object */ + pSession->bAutoAttach++; rc = sessionFindTable(pSession, zTbl, &pTo); + pSession->bAutoAttach--; if( pTo==0 ) goto diff_out; if( sessionInitTable(pSession, pTo, pSession->db, pSession->zDb) ){ rc = pSession->rc; @@ -229011,17 +230367,43 @@ SQLITE_API int sqlite3session_diff( if( rc==SQLITE_OK ){ int bHasPk = 0; int bMismatch = 0; - int nCol; /* Columns in zFrom.zTbl */ + int nCol = 0; /* Columns in zFrom.zTbl */ int bRowid = 0; - u8 *abPK; + u8 *abPK = 0; const char **azCol = 0; - rc = sessionTableInfo(0, db, zFrom, zTbl, - &nCol, 0, 0, &azCol, 0, 0, &abPK, - pSession->bImplicitPK ? &bRowid : 0 - ); + char *zDbExists = 0; + + /* Check that database zFrom is attached. */ + zDbExists = sqlite3_mprintf("SELECT * FROM %Q.sqlite_schema", zFrom); + if( zDbExists==0 ){ + rc = SQLITE_NOMEM; + }else{ + sqlite3_stmt *pDbExists = 0; + rc = sqlite3_prepare_v2(db, zDbExists, -1, &pDbExists, 0); + if( rc==SQLITE_ERROR ){ + rc = SQLITE_OK; + nCol = -1; + } + sqlite3_finalize(pDbExists); + sqlite3_free(zDbExists); + } + + if( rc==SQLITE_OK && nCol==0 ){ + rc = sessionTableInfo(0, db, zFrom, zTbl, + &nCol, 0, 0, &azCol, 0, 0, &abPK, + pSession->bImplicitPK ? &bRowid : 0 + ); + } if( rc==SQLITE_OK ){ if( pTo->nCol!=nCol ){ - bMismatch = 1; + if( nCol<=0 ){ + rc = SQLITE_SCHEMA; + if( pzErrMsg ){ + *pzErrMsg = sqlite3_mprintf("no such table: %s.%s", zFrom, zTbl); + } + }else{ + bMismatch = 1; + } }else{ int i; for(i=0; idb; /* Source database handle */ SessionTable *pTab; /* Used to iterate through attached tables */ - SessionBuffer buf = {0,0,0}; /* Buffer in which to accumlate changeset */ + SessionBuffer buf = {0,0,0}; /* Buffer in which to accumulate changeset */ int rc; /* Return code */ assert( xOutput==0 || (pnChangeset==0 && ppChangeset==0) ); @@ -230150,14 +231532,15 @@ SQLITE_API int sqlite3changeset_start_v2_strm( ** object and the buffer is full, discard some data to free up space. */ static void sessionDiscardData(SessionInput *pIn){ - if( pIn->xInput && pIn->iNext>=sessions_strm_chunk_size ){ - int nMove = pIn->buf.nBuf - pIn->iNext; + if( pIn->xInput && pIn->iCurrent>=sessions_strm_chunk_size ){ + int nMove = pIn->buf.nBuf - pIn->iCurrent; assert( nMove>=0 ); if( nMove>0 ){ - memmove(pIn->buf.aBuf, &pIn->buf.aBuf[pIn->iNext], nMove); + memmove(pIn->buf.aBuf, &pIn->buf.aBuf[pIn->iCurrent], nMove); } - pIn->buf.nBuf -= pIn->iNext; - pIn->iNext = 0; + pIn->buf.nBuf -= pIn->iCurrent; + pIn->iNext -= pIn->iCurrent; + pIn->iCurrent = 0; pIn->nData = pIn->buf.nBuf; } } @@ -230511,8 +231894,8 @@ static int sessionChangesetNextOne( p->rc = sessionInputBuffer(&p->in, 2); if( p->rc!=SQLITE_OK ) return p->rc; - sessionDiscardData(&p->in); p->in.iCurrent = p->in.iNext; + sessionDiscardData(&p->in); /* If the iterator is already at the end of the changeset, return DONE. */ if( p->in.iNext>=p->in.nData ){ @@ -232871,14 +234254,19 @@ SQLITE_API int sqlite3changegroup_add_change( sqlite3_changegroup *pGrp, sqlite3_changeset_iter *pIter ){ + int rc = SQLITE_OK; + if( pIter->in.iCurrent==pIter->in.iNext || pIter->rc!=SQLITE_OK || pIter->bInvert ){ /* Iterator does not point to any valid entry or is an INVERT iterator. */ - return SQLITE_ERROR; + rc = SQLITE_ERROR; + }else{ + pIter->in.bNoDiscard = 1; + rc = sessionOneChangeToHash(pGrp, pIter, 0); } - return sessionOneChangeToHash(pGrp, pIter, 0); + return rc; } /* @@ -234176,6 +235564,7 @@ SQLITE_EXTENSION_INIT1 /* #include */ /* #include */ +/* #include */ #ifndef SQLITE_AMALGAMATION @@ -234231,6 +235620,18 @@ typedef sqlite3_uint64 u64; # define EIGHT_BYTE_ALIGNMENT(X) ((((uptr)(X) - (uptr)0)&7)==0) #endif +/* +** Macros needed to provide flexible arrays in a portable way +*/ +#ifndef offsetof +# define offsetof(STRUCTURE,FIELD) ((size_t)((char*)&((STRUCTURE*)0)->FIELD)) +#endif +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) +# define FLEXARRAY +#else +# define FLEXARRAY 1 +#endif + #endif /* Truncate very long tokens to this many bytes. Hard limit is @@ -234303,10 +235704,11 @@ typedef struct Fts5Colset Fts5Colset; */ struct Fts5Colset { int nCol; - int aiCol[1]; + int aiCol[FLEXARRAY]; }; - +/* Size (int bytes) of a complete Fts5Colset object with N columns. */ +#define SZ_FTS5COLSET(N) (sizeof(i64)*((N+2)/2)) /************************************************************************** ** Interface to code in fts5_config.c. fts5_config.c contains contains code @@ -235135,7 +236537,7 @@ static void sqlite3Fts5UnicodeAscii(u8*, u8*); ** ** The "lemon" program processes an LALR(1) input grammar file, then uses ** this template to construct a parser. The "lemon" program inserts text -** at each "%%" line. Also, any "P-a-r-s-e" identifer prefix (without the +** at each "%%" line. Also, any "P-a-r-s-e" identifier prefix (without the ** interstitial "-" characters) contained in this template is changed into ** the value of the %name directive from the grammar. Otherwise, the content ** of this template is copied straight through into the generate parser @@ -237289,7 +238691,7 @@ static int fts5Bm25GetData( ** under consideration. ** ** The problem with this is that if (N < 2*nHit), the IDF is - ** negative. Which is undesirable. So the mimimum allowable IDF is + ** negative. Which is undesirable. So the minimum allowable IDF is ** (1e-6) - roughly the same as a term that appears in just over ** half of set of 5,000,000 documents. */ double idf = log( (nRow - nHit + 0.5) / (nHit + 0.5) ); @@ -237752,7 +239154,7 @@ static char *sqlite3Fts5Strndup(int *pRc, const char *pIn, int nIn){ ** * The 52 upper and lower case ASCII characters, and ** * The 10 integer ASCII characters. ** * The underscore character "_" (0x5F). -** * The unicode "subsitute" character (0x1A). +** * The unicode "substitute" character (0x1A). */ static int sqlite3Fts5IsBareword(char t){ u8 aBareword[128] = { @@ -239070,9 +240472,13 @@ struct Fts5ExprNode { /* Child nodes. For a NOT node, this array always contains 2 entries. For ** AND or OR nodes, it contains 2 or more entries. */ int nChild; /* Number of child nodes */ - Fts5ExprNode *apChild[1]; /* Array of child nodes */ + Fts5ExprNode *apChild[FLEXARRAY]; /* Array of child nodes */ }; +/* Size (in bytes) of an Fts5ExprNode object that holds up to N children */ +#define SZ_FTS5EXPRNODE(N) \ + (offsetof(Fts5ExprNode,apChild) + (N)*sizeof(Fts5ExprNode*)) + #define Fts5NodeIsString(p) ((p)->eType==FTS5_TERM || (p)->eType==FTS5_STRING) /* @@ -239103,9 +240509,13 @@ struct Fts5ExprPhrase { Fts5ExprNode *pNode; /* FTS5_STRING node this phrase is part of */ Fts5Buffer poslist; /* Current position list */ int nTerm; /* Number of entries in aTerm[] */ - Fts5ExprTerm aTerm[1]; /* Terms that make up this phrase */ + Fts5ExprTerm aTerm[FLEXARRAY]; /* Terms that make up this phrase */ }; +/* Size (in bytes) of an Fts5ExprPhrase object that holds up to N terms */ +#define SZ_FTS5EXPRPHRASE(N) \ + (offsetof(Fts5ExprPhrase,aTerm) + (N)*sizeof(Fts5ExprTerm)) + /* ** One or more phrases that must appear within a certain token distance of ** each other within each matching document. @@ -239114,9 +240524,12 @@ struct Fts5ExprNearset { int nNear; /* NEAR parameter */ Fts5Colset *pColset; /* Columns to search (NULL -> all columns) */ int nPhrase; /* Number of entries in aPhrase[] array */ - Fts5ExprPhrase *apPhrase[1]; /* Array of phrase pointers */ + Fts5ExprPhrase *apPhrase[FLEXARRAY]; /* Array of phrase pointers */ }; +/* Size (in bytes) of an Fts5ExprNearset object covering up to N phrases */ +#define SZ_FTS5EXPRNEARSET(N) \ + (offsetof(Fts5ExprNearset,apPhrase)+(N)*sizeof(Fts5ExprPhrase*)) /* ** Parse context. @@ -239276,7 +240689,7 @@ static int sqlite3Fts5ExprNew( /* If the LHS of the MATCH expression was a user column, apply the ** implicit column-filter. */ if( sParse.rc==SQLITE_OK && iColnCol ){ - int n = sizeof(Fts5Colset); + int n = SZ_FTS5COLSET(1); Fts5Colset *pColset = (Fts5Colset*)sqlite3Fts5MallocZero(&sParse.rc, n); if( pColset ){ pColset->nCol = 1; @@ -240634,7 +242047,7 @@ static Fts5ExprNearset *sqlite3Fts5ParseNearset( if( pParse->rc==SQLITE_OK ){ if( pNear==0 ){ sqlite3_int64 nByte; - nByte = sizeof(Fts5ExprNearset) + SZALLOC * sizeof(Fts5ExprPhrase*); + nByte = SZ_FTS5EXPRNEARSET(SZALLOC+1); pRet = sqlite3_malloc64(nByte); if( pRet==0 ){ pParse->rc = SQLITE_NOMEM; @@ -240645,7 +242058,7 @@ static Fts5ExprNearset *sqlite3Fts5ParseNearset( int nNew = pNear->nPhrase + SZALLOC; sqlite3_int64 nByte; - nByte = sizeof(Fts5ExprNearset) + nNew * sizeof(Fts5ExprPhrase*); + nByte = SZ_FTS5EXPRNEARSET(nNew+1); pRet = (Fts5ExprNearset*)sqlite3_realloc64(pNear, nByte); if( pRet==0 ){ pParse->rc = SQLITE_NOMEM; @@ -240736,12 +242149,12 @@ static int fts5ParseTokenize( int nNew = SZALLOC + (pPhrase ? pPhrase->nTerm : 0); pNew = (Fts5ExprPhrase*)sqlite3_realloc64(pPhrase, - sizeof(Fts5ExprPhrase) + sizeof(Fts5ExprTerm) * nNew + SZ_FTS5EXPRPHRASE(nNew+1) ); if( pNew==0 ){ rc = SQLITE_NOMEM; }else{ - if( pPhrase==0 ) memset(pNew, 0, sizeof(Fts5ExprPhrase)); + if( pPhrase==0 ) memset(pNew, 0, SZ_FTS5EXPRPHRASE(1)); pCtx->pPhrase = pPhrase = pNew; pNew->nTerm = nNew - SZALLOC; } @@ -240849,7 +242262,7 @@ static Fts5ExprPhrase *sqlite3Fts5ParseTerm( if( sCtx.pPhrase==0 ){ /* This happens when parsing a token or quoted phrase that contains ** no token characters at all. (e.g ... MATCH '""'). */ - sCtx.pPhrase = sqlite3Fts5MallocZero(&pParse->rc, sizeof(Fts5ExprPhrase)); + sCtx.pPhrase = sqlite3Fts5MallocZero(&pParse->rc, SZ_FTS5EXPRPHRASE(1)); }else if( sCtx.pPhrase->nTerm ){ sCtx.pPhrase->aTerm[sCtx.pPhrase->nTerm-1].bPrefix = (u8)bPrefix; } @@ -240884,19 +242297,18 @@ static int sqlite3Fts5ExprClonePhrase( sizeof(Fts5ExprPhrase*)); } if( rc==SQLITE_OK ){ - pNew->pRoot = (Fts5ExprNode*)sqlite3Fts5MallocZero(&rc, - sizeof(Fts5ExprNode)); + pNew->pRoot = (Fts5ExprNode*)sqlite3Fts5MallocZero(&rc, SZ_FTS5EXPRNODE(1)); } if( rc==SQLITE_OK ){ pNew->pRoot->pNear = (Fts5ExprNearset*)sqlite3Fts5MallocZero(&rc, - sizeof(Fts5ExprNearset) + sizeof(Fts5ExprPhrase*)); + SZ_FTS5EXPRNEARSET(2)); } if( rc==SQLITE_OK && ALWAYS(pOrig!=0) ){ Fts5Colset *pColsetOrig = pOrig->pNode->pNear->pColset; if( pColsetOrig ){ sqlite3_int64 nByte; Fts5Colset *pColset; - nByte = sizeof(Fts5Colset) + (pColsetOrig->nCol-1) * sizeof(int); + nByte = SZ_FTS5COLSET(pColsetOrig->nCol); pColset = (Fts5Colset*)sqlite3Fts5MallocZero(&rc, nByte); if( pColset ){ memcpy(pColset, pColsetOrig, (size_t)nByte); @@ -240924,7 +242336,7 @@ static int sqlite3Fts5ExprClonePhrase( }else{ /* This happens when parsing a token or quoted phrase that contains ** no token characters at all. (e.g ... MATCH '""'). */ - sCtx.pPhrase = sqlite3Fts5MallocZero(&rc, sizeof(Fts5ExprPhrase)); + sCtx.pPhrase = sqlite3Fts5MallocZero(&rc, SZ_FTS5EXPRPHRASE(1)); } } @@ -240989,7 +242401,8 @@ static void sqlite3Fts5ParseSetDistance( ); return; } - nNear = nNear * 10 + (p->p[i] - '0'); + if( nNear<214748363 ) nNear = nNear * 10 + (p->p[i] - '0'); + /* ^^^^^^^^^^^^^^^--- Prevent integer overflow */ } }else{ nNear = FTS5_DEFAULT_NEARDIST; @@ -241018,7 +242431,7 @@ static Fts5Colset *fts5ParseColset( assert( pParse->rc==SQLITE_OK ); assert( iCol>=0 && iColpConfig->nCol ); - pNew = sqlite3_realloc64(p, sizeof(Fts5Colset) + sizeof(int)*nCol); + pNew = sqlite3_realloc64(p, SZ_FTS5COLSET(nCol+1)); if( pNew==0 ){ pParse->rc = SQLITE_NOMEM; }else{ @@ -241053,7 +242466,7 @@ static Fts5Colset *sqlite3Fts5ParseColsetInvert(Fts5Parse *pParse, Fts5Colset *p int nCol = pParse->pConfig->nCol; pRet = (Fts5Colset*)sqlite3Fts5MallocZero(&pParse->rc, - sizeof(Fts5Colset) + sizeof(int)*nCol + SZ_FTS5COLSET(nCol+1) ); if( pRet ){ int i; @@ -241114,7 +242527,7 @@ static Fts5Colset *sqlite3Fts5ParseColset( static Fts5Colset *fts5CloneColset(int *pRc, Fts5Colset *pOrig){ Fts5Colset *pRet; if( pOrig ){ - sqlite3_int64 nByte = sizeof(Fts5Colset) + (pOrig->nCol-1) * sizeof(int); + sqlite3_int64 nByte = SZ_FTS5COLSET(pOrig->nCol); pRet = (Fts5Colset*)sqlite3Fts5MallocZero(pRc, nByte); if( pRet ){ memcpy(pRet, pOrig, (size_t)nByte); @@ -241282,7 +242695,7 @@ static Fts5ExprNode *fts5ParsePhraseToAnd( assert( pNear->nPhrase==1 ); assert( pParse->bPhraseToAnd ); - nByte = sizeof(Fts5ExprNode) + nTerm*sizeof(Fts5ExprNode*); + nByte = SZ_FTS5EXPRNODE(nTerm+1); pRet = (Fts5ExprNode*)sqlite3Fts5MallocZero(&pParse->rc, nByte); if( pRet ){ pRet->eType = FTS5_AND; @@ -241292,7 +242705,7 @@ static Fts5ExprNode *fts5ParsePhraseToAnd( pParse->nPhrase--; for(ii=0; iirc, sizeof(Fts5ExprPhrase) + &pParse->rc, SZ_FTS5EXPRPHRASE(1) ); if( pPhrase ){ if( parseGrowPhraseArray(pParse) ){ @@ -241361,7 +242774,7 @@ static Fts5ExprNode *sqlite3Fts5ParseNode( if( pRight->eType==eType ) nChild += pRight->nChild-1; } - nByte = sizeof(Fts5ExprNode) + sizeof(Fts5ExprNode*)*(nChild-1); + nByte = SZ_FTS5EXPRNODE(nChild); pRet = (Fts5ExprNode*)sqlite3Fts5MallocZero(&pParse->rc, nByte); if( pRet ){ @@ -242236,7 +243649,7 @@ static int sqlite3Fts5ExprInstToken( } /* -** Clear the token mappings for all Fts5IndexIter objects mannaged by +** Clear the token mappings for all Fts5IndexIter objects managed by ** the expression passed as the only argument. */ static void sqlite3Fts5ExprClearTokens(Fts5Expr *pExpr){ @@ -242271,7 +243684,7 @@ typedef struct Fts5HashEntry Fts5HashEntry; /* ** This file contains the implementation of an in-memory hash table used -** to accumuluate "term -> doclist" content before it is flused to a level-0 +** to accumulate "term -> doclist" content before it is flushed to a level-0 ** segment. */ @@ -242328,7 +243741,7 @@ struct Fts5HashEntry { }; /* -** Eqivalent to: +** Equivalent to: ** ** char *fts5EntryKey(Fts5HashEntry *pEntry){ return zKey; } */ @@ -243264,9 +244677,13 @@ struct Fts5Structure { u64 nOriginCntr; /* Origin value for next top-level segment */ int nSegment; /* Total segments in this structure */ int nLevel; /* Number of levels in this index */ - Fts5StructureLevel aLevel[1]; /* Array of nLevel level objects */ + Fts5StructureLevel aLevel[FLEXARRAY]; /* Array of nLevel level objects */ }; +/* Size (in bytes) of an Fts5Structure object holding up to N levels */ +#define SZ_FTS5STRUCTURE(N) \ + (offsetof(Fts5Structure,aLevel) + (N)*sizeof(Fts5StructureLevel)) + /* ** An object of type Fts5SegWriter is used to write to segments. */ @@ -243396,11 +244813,15 @@ struct Fts5SegIter { ** Array of tombstone pages. Reference counted. */ struct Fts5TombstoneArray { - int nRef; /* Number of pointers to this object */ + int nRef; /* Number of pointers to this object */ int nTombstone; - Fts5Data *apTombstone[1]; /* Array of tombstone pages */ + Fts5Data *apTombstone[FLEXARRAY]; /* Array of tombstone pages */ }; +/* Size (in bytes) of an Fts5TombstoneArray holding up to N tombstones */ +#define SZ_FTS5TOMBSTONEARRAY(N) \ + (offsetof(Fts5TombstoneArray,apTombstone)+(N)*sizeof(Fts5Data*)) + /* ** Argument is a pointer to an Fts5Data structure that contains a ** leaf page. @@ -243469,9 +244890,12 @@ struct Fts5Iter { i64 iSwitchRowid; /* Firstest rowid of other than aFirst[1] */ Fts5CResult *aFirst; /* Current merge state (see above) */ - Fts5SegIter aSeg[1]; /* Array of segment iterators */ + Fts5SegIter aSeg[FLEXARRAY]; /* Array of segment iterators */ }; +/* Size (in bytes) of an Fts5Iter object holding up to N segment iterators */ +#define SZ_FTS5ITER(N) (offsetof(Fts5Iter,aSeg)+(N)*sizeof(Fts5SegIter)) + /* ** An instance of the following type is used to iterate through the contents ** of a doclist-index record. @@ -243498,9 +244922,13 @@ struct Fts5DlidxLvl { struct Fts5DlidxIter { int nLvl; int iSegid; - Fts5DlidxLvl aLvl[1]; + Fts5DlidxLvl aLvl[FLEXARRAY]; }; +/* Size (in bytes) of an Fts5DlidxIter object with up to N levels */ +#define SZ_FTS5DLIDXITER(N) \ + (offsetof(Fts5DlidxIter,aLvl)+(N)*sizeof(Fts5DlidxLvl)) + static void fts5PutU16(u8 *aOut, u16 iVal){ aOut[0] = (iVal>>8); aOut[1] = (iVal&0xFF); @@ -243868,7 +245296,7 @@ static int sqlite3Fts5StructureTest(Fts5Index *p, void *pStruct){ static void fts5StructureMakeWritable(int *pRc, Fts5Structure **pp){ Fts5Structure *p = *pp; if( *pRc==SQLITE_OK && p->nRef>1 ){ - i64 nByte = sizeof(Fts5Structure)+(p->nLevel-1)*sizeof(Fts5StructureLevel); + i64 nByte = SZ_FTS5STRUCTURE(p->nLevel); Fts5Structure *pNew; pNew = (Fts5Structure*)sqlite3Fts5MallocZero(pRc, nByte); if( pNew ){ @@ -243942,10 +245370,7 @@ static int fts5StructureDecode( ){ return FTS5_CORRUPT; } - nByte = ( - sizeof(Fts5Structure) + /* Main structure */ - sizeof(Fts5StructureLevel) * (nLevel-1) /* aLevel[] array */ - ); + nByte = SZ_FTS5STRUCTURE(nLevel); pRet = (Fts5Structure*)sqlite3Fts5MallocZero(&rc, nByte); if( pRet ){ @@ -244025,10 +245450,7 @@ static void fts5StructureAddLevel(int *pRc, Fts5Structure **ppStruct){ if( *pRc==SQLITE_OK ){ Fts5Structure *pStruct = *ppStruct; int nLevel = pStruct->nLevel; - sqlite3_int64 nByte = ( - sizeof(Fts5Structure) + /* Main structure */ - sizeof(Fts5StructureLevel) * (nLevel+1) /* aLevel[] array */ - ); + sqlite3_int64 nByte = SZ_FTS5STRUCTURE(nLevel+2); pStruct = sqlite3_realloc64(pStruct, nByte); if( pStruct ){ @@ -244567,7 +245989,7 @@ static Fts5DlidxIter *fts5DlidxIterInit( int bDone = 0; for(i=0; p->rc==SQLITE_OK && bDone==0; i++){ - sqlite3_int64 nByte = sizeof(Fts5DlidxIter) + i * sizeof(Fts5DlidxLvl); + sqlite3_int64 nByte = SZ_FTS5DLIDXITER(i+1); Fts5DlidxIter *pNew; pNew = (Fts5DlidxIter*)sqlite3_realloc64(pIter, nByte); @@ -244783,9 +246205,9 @@ static void fts5SegIterSetNext(Fts5Index *p, Fts5SegIter *pIter){ ** leave an error in the Fts5Index object. */ static void fts5SegIterAllocTombstone(Fts5Index *p, Fts5SegIter *pIter){ - const int nTomb = pIter->pSeg->nPgTombstone; + const i64 nTomb = (i64)pIter->pSeg->nPgTombstone; if( nTomb>0 ){ - int nByte = nTomb * sizeof(Fts5Data*) + sizeof(Fts5TombstoneArray); + i64 nByte = SZ_FTS5TOMBSTONEARRAY(nTomb+1); Fts5TombstoneArray *pNew; pNew = (Fts5TombstoneArray*)sqlite3Fts5MallocZero(&p->rc, nByte); if( pNew ){ @@ -246246,8 +247668,7 @@ static Fts5Iter *fts5MultiIterAlloc( for(nSlot=2; nSlotaSeg[] */ + SZ_FTS5ITER(nSlot) + /* pNew + pNew->aSeg[] */ sizeof(Fts5CResult) * nSlot /* pNew->aFirst[] */ ); if( pNew ){ @@ -248048,7 +249469,7 @@ static void fts5DoSecureDelete( int iDelKeyOff = 0; /* Offset of deleted key, if any */ nIdx = nPg-iPgIdx; - aIdx = sqlite3Fts5MallocZero(&p->rc, nIdx+16); + aIdx = sqlite3Fts5MallocZero(&p->rc, ((i64)nIdx)+16); if( p->rc ) return; memcpy(aIdx, &aPg[iPgIdx], nIdx); @@ -248613,7 +250034,7 @@ static Fts5Structure *fts5IndexOptimizeStruct( Fts5Structure *pStruct ){ Fts5Structure *pNew = 0; - sqlite3_int64 nByte = sizeof(Fts5Structure); + sqlite3_int64 nByte = SZ_FTS5STRUCTURE(1); int nSeg = pStruct->nSegment; int i; @@ -248642,7 +250063,8 @@ static Fts5Structure *fts5IndexOptimizeStruct( assert( pStruct->aLevel[i].nMerge<=nThis ); } - nByte += (pStruct->nLevel+1) * sizeof(Fts5StructureLevel); + nByte += (((i64)pStruct->nLevel)+1) * sizeof(Fts5StructureLevel); + assert( nByte==SZ_FTS5STRUCTURE(pStruct->nLevel+2) ); pNew = (Fts5Structure*)sqlite3Fts5MallocZero(&p->rc, nByte); if( pNew ){ @@ -249219,9 +250641,13 @@ struct Fts5TokenDataIter { int nIterAlloc; Fts5PoslistReader *aPoslistReader; int *aPoslistToIter; - Fts5Iter *apIter[1]; + Fts5Iter *apIter[FLEXARRAY]; }; +/* Size in bytes of an Fts5TokenDataIter object holding up to N iterators */ +#define SZ_FTS5TOKENDATAITER(N) \ + (offsetof(Fts5TokenDataIter,apIter) + (N)*sizeof(Fts5Iter)) + /* ** The two input arrays - a1[] and a2[] - are in sorted order. This function ** merges the two arrays together and writes the result to output array @@ -249293,7 +250719,7 @@ static void fts5TokendataIterAppendMap( /* ** Sort the contents of the pT->aMap[] array. ** -** The sorting algorithm requries a malloc(). If this fails, an error code +** The sorting algorithm requires a malloc(). If this fails, an error code ** is left in Fts5Index.rc before returning. */ static void fts5TokendataIterSortMap(Fts5Index *p, Fts5TokenDataIter *pT){ @@ -249484,7 +250910,7 @@ static void fts5SetupPrefixIter( && p->pConfig->bPrefixInsttoken ){ s.pTokendata = &s2; - s2.pT = (Fts5TokenDataIter*)fts5IdxMalloc(p, sizeof(*s2.pT)); + s2.pT = (Fts5TokenDataIter*)fts5IdxMalloc(p, SZ_FTS5TOKENDATAITER(1)); } if( p->pConfig->eDetail==FTS5_DETAIL_NONE ){ @@ -249530,7 +250956,8 @@ static void fts5SetupPrefixIter( } } - pData = fts5IdxMalloc(p, sizeof(*pData)+s.doclist.n+FTS5_DATA_ZERO_PADDING); + pData = fts5IdxMalloc(p, sizeof(*pData) + + ((i64)s.doclist.n)+FTS5_DATA_ZERO_PADDING); assert( pData!=0 || p->rc!=SQLITE_OK ); if( pData ){ pData->p = (u8*)&pData[1]; @@ -249611,15 +251038,17 @@ static int sqlite3Fts5IndexRollback(Fts5Index *p){ ** and the initial version of the "averages" record (a zero-byte blob). */ static int sqlite3Fts5IndexReinit(Fts5Index *p){ - Fts5Structure s; + Fts5Structure *pTmp; + u8 tmpSpace[SZ_FTS5STRUCTURE(1)]; fts5StructureInvalidate(p); fts5IndexDiscardData(p); - memset(&s, 0, sizeof(Fts5Structure)); + pTmp = (Fts5Structure*)tmpSpace; + memset(pTmp, 0, SZ_FTS5STRUCTURE(1)); if( p->pConfig->bContentlessDelete ){ - s.nOriginCntr = 1; + pTmp->nOriginCntr = 1; } fts5DataWrite(p, FTS5_AVERAGES_ROWID, (const u8*)"", 0); - fts5StructureWrite(p, &s); + fts5StructureWrite(p, pTmp); return fts5IndexReturn(p); } @@ -249827,7 +251256,7 @@ static Fts5TokenDataIter *fts5AppendTokendataIter( if( p->rc==SQLITE_OK ){ if( pIn==0 || pIn->nIter==pIn->nIterAlloc ){ int nAlloc = pIn ? pIn->nIterAlloc*2 : 16; - int nByte = nAlloc * sizeof(Fts5Iter*) + sizeof(Fts5TokenDataIter); + int nByte = SZ_FTS5TOKENDATAITER(nAlloc+1); Fts5TokenDataIter *pNew = (Fts5TokenDataIter*)sqlite3_realloc(pIn, nByte); if( pNew==0 ){ @@ -250343,7 +251772,8 @@ static int fts5SetupPrefixIterTokendata( fts5BufferGrow(&p->rc, &token, nToken+1); assert( token.p!=0 || p->rc!=SQLITE_OK ); - ctx.pT = (Fts5TokenDataIter*)sqlite3Fts5MallocZero(&p->rc, sizeof(*ctx.pT)); + ctx.pT = (Fts5TokenDataIter*)sqlite3Fts5MallocZero(&p->rc, + SZ_FTS5TOKENDATAITER(1)); if( p->rc==SQLITE_OK ){ @@ -250474,7 +251904,8 @@ static int sqlite3Fts5IndexIterWriteTokendata( if( pIter->nSeg>0 ){ /* This is a prefix term iterator. */ if( pT==0 ){ - pT = (Fts5TokenDataIter*)sqlite3Fts5MallocZero(&p->rc, sizeof(*pT)); + pT = (Fts5TokenDataIter*)sqlite3Fts5MallocZero(&p->rc, + SZ_FTS5TOKENDATAITER(1)); pIter->pTokenDataIter = pT; } if( pT ){ @@ -251508,7 +252939,7 @@ static void fts5DecodeRowid( #if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG) static void fts5DebugRowid(int *pRc, Fts5Buffer *pBuf, i64 iKey){ - int iSegid, iHeight, iPgno, bDlidx, bTomb; /* Rowid compenents */ + int iSegid, iHeight, iPgno, bDlidx, bTomb; /* Rowid components */ fts5DecodeRowid(iKey, &bTomb, &iSegid, &bDlidx, &iHeight, &iPgno); if( iSegid==0 ){ @@ -251754,7 +253185,7 @@ static void fts5DecodeFunction( ** buffer overreads even if the record is corrupt. */ n = sqlite3_value_bytes(apVal[1]); aBlob = sqlite3_value_blob(apVal[1]); - nSpace = n + FTS5_DATA_ZERO_PADDING; + nSpace = ((i64)n) + FTS5_DATA_ZERO_PADDING; a = (u8*)sqlite3Fts5MallocZero(&rc, nSpace); if( a==0 ) goto decode_out; if( n>0 ) memcpy(a, aBlob, n); @@ -252469,9 +253900,11 @@ struct Fts5Sorter { i64 iRowid; /* Current rowid */ const u8 *aPoslist; /* Position lists for current row */ int nIdx; /* Number of entries in aIdx[] */ - int aIdx[1]; /* Offsets into aPoslist for current row */ + int aIdx[FLEXARRAY]; /* Offsets into aPoslist for current row */ }; +/* Size (int bytes) of an Fts5Sorter object with N indexes */ +#define SZ_FTS5SORTER(N) (offsetof(Fts5Sorter,nIdx)+((N+2)/2)*sizeof(i64)) /* ** Virtual-table cursor object. @@ -253349,7 +254782,7 @@ static int fts5CursorFirstSorted( const char *zRankArgs = pCsr->zRankArgs; nPhrase = sqlite3Fts5ExprPhraseCount(pCsr->pExpr); - nByte = sizeof(Fts5Sorter) + sizeof(int) * (nPhrase-1); + nByte = SZ_FTS5SORTER(nPhrase); pSorter = (Fts5Sorter*)sqlite3_malloc64(nByte); if( pSorter==0 ) return SQLITE_NOMEM; memset(pSorter, 0, (size_t)nByte); @@ -255875,7 +257308,7 @@ static void fts5SourceIdFunc( ){ assert( nArg==0 ); UNUSED_PARAM2(nArg, apUnused); - sqlite3_result_text(pCtx, "fts5: 2025-02-18 13:38:58 873d4e274b4988d260ba8354a9718324a1c26187a4ab4c1cc0227c03d0f10e70", -1, SQLITE_TRANSIENT); + sqlite3_result_text(pCtx, "fts5: 2025-07-30 19:33:53 4d8adfb30e03f9cf27f800a2c1ba3c48fb4ca1b08b0f5ed59a4d5ecbf45e20a3", -1, SQLITE_TRANSIENT); } /* @@ -256100,8 +257533,8 @@ static int fts5Init(sqlite3 *db){ ** its entry point to enable the matchinfo() demo. */ #ifdef SQLITE_FTS5_ENABLE_TEST_MI if( rc==SQLITE_OK ){ - extern int sqlite3Fts5TestRegisterMatchinfo(sqlite3*); - rc = sqlite3Fts5TestRegisterMatchinfo(db); + extern int sqlite3Fts5TestRegisterMatchinfoAPI(fts5_api*); + rc = sqlite3Fts5TestRegisterMatchinfoAPI(&pGlobal->api); } #endif @@ -256690,6 +258123,7 @@ static int fts5StorageDeleteFromIndex( for(iCol=1; rc==SQLITE_OK && iCol<=pConfig->nCol; iCol++){ if( pConfig->abUnindexed[iCol-1]==0 ){ sqlite3_value *pVal = 0; + sqlite3_value *pFree = 0; const char *pText = 0; int nText = 0; const char *pLoc = 0; @@ -256706,11 +258140,22 @@ static int fts5StorageDeleteFromIndex( if( pConfig->bLocale && sqlite3Fts5IsLocaleValue(pConfig, pVal) ){ rc = sqlite3Fts5DecodeLocaleValue(pVal, &pText, &nText, &pLoc, &nLoc); }else{ - pText = (const char*)sqlite3_value_text(pVal); - nText = sqlite3_value_bytes(pVal); - if( pConfig->bLocale && pSeek ){ - pLoc = (const char*)sqlite3_column_text(pSeek, iCol + pConfig->nCol); - nLoc = sqlite3_column_bytes(pSeek, iCol + pConfig->nCol); + if( sqlite3_value_type(pVal)!=SQLITE_TEXT ){ + /* Make a copy of the value to work with. This is because the call + ** to sqlite3_value_text() below forces the type of the value to + ** SQLITE_TEXT, and we may need to use it again later. */ + pFree = pVal = sqlite3_value_dup(pVal); + if( pVal==0 ){ + rc = SQLITE_NOMEM; + } + } + if( rc==SQLITE_OK ){ + pText = (const char*)sqlite3_value_text(pVal); + nText = sqlite3_value_bytes(pVal); + if( pConfig->bLocale && pSeek ){ + pLoc = (const char*)sqlite3_column_text(pSeek, iCol+pConfig->nCol); + nLoc = sqlite3_column_bytes(pSeek, iCol + pConfig->nCol); + } } } @@ -256726,6 +258171,7 @@ static int fts5StorageDeleteFromIndex( } sqlite3Fts5ClearLocale(pConfig); } + sqlite3_value_free(pFree); } } if( rc==SQLITE_OK && p->nTotalRow<1 ){ @@ -259939,7 +261385,6 @@ static void sqlite3Fts5UnicodeAscii(u8 *aArray, u8 *aAscii){ aAscii[0] = 0; /* 0x00 is never a token character */ } - /* ** 2015 May 30 ** @@ -260480,12 +261925,12 @@ static int fts5VocabInitVtab( *pzErr = sqlite3_mprintf("wrong number of vtable arguments"); rc = SQLITE_ERROR; }else{ - int nByte; /* Bytes of space to allocate */ + i64 nByte; /* Bytes of space to allocate */ const char *zDb = bDb ? argv[3] : argv[1]; const char *zTab = bDb ? argv[4] : argv[3]; const char *zType = bDb ? argv[5] : argv[4]; - int nDb = (int)strlen(zDb)+1; - int nTab = (int)strlen(zTab)+1; + i64 nDb = strlen(zDb)+1; + i64 nTab = strlen(zTab)+1; int eType = 0; rc = fts5VocabTableType(zType, pzErr, &eType); diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h index 5e07ce68e..c34235d84 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h @@ -134,7 +134,7 @@ extern "C" { ** ** Since [version 3.6.18] ([dateof:3.6.18]), ** SQLite source code has been stored in the -** Fossil configuration management +** Fossil configuration management ** system. ^The SQLITE_SOURCE_ID macro evaluates to ** a string which identifies a particular check-in of SQLite ** within its configuration management system. ^The SQLITE_SOURCE_ID @@ -147,9 +147,9 @@ extern "C" { ** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite_version()] and [sqlite_source_id()]. */ -#define SQLITE_VERSION "3.49.1" -#define SQLITE_VERSION_NUMBER 3049001 -#define SQLITE_SOURCE_ID "2025-02-18 13:38:58 873d4e274b4988d260ba8354a9718324a1c26187a4ab4c1cc0227c03d0f10e70" +#define SQLITE_VERSION "3.50.4" +#define SQLITE_VERSION_NUMBER 3050004 +#define SQLITE_SOURCE_ID "2025-07-30 19:33:53 4d8adfb30e03f9cf27f800a2c1ba3c48fb4ca1b08b0f5ed59a4d5ecbf45e20a3" /* ** CAPI3REF: Run-Time Library Version Numbers @@ -1164,6 +1164,12 @@ struct sqlite3_io_methods { ** the value that M is to be set to. Before returning, the 32-bit signed ** integer is overwritten with the previous value of M. ** +**
        • [[SQLITE_FCNTL_BLOCK_ON_CONNECT]] +** The [SQLITE_FCNTL_BLOCK_ON_CONNECT] opcode is used to configure the +** VFS to block when taking a SHARED lock to connect to a wal mode database. +** This is used to implement the functionality associated with +** SQLITE_SETLK_BLOCK_ON_CONNECT. +** **
        • [[SQLITE_FCNTL_DATA_VERSION]] ** The [SQLITE_FCNTL_DATA_VERSION] opcode is used to detect changes to ** a database file. The argument is a pointer to a 32-bit unsigned integer. @@ -1260,6 +1266,7 @@ struct sqlite3_io_methods { #define SQLITE_FCNTL_CKSM_FILE 41 #define SQLITE_FCNTL_RESET_CACHE 42 #define SQLITE_FCNTL_NULL_IO 43 +#define SQLITE_FCNTL_BLOCK_ON_CONNECT 44 /* deprecated names */ #define SQLITE_GET_LOCKPROXYFILE SQLITE_FCNTL_GET_LOCKPROXYFILE @@ -1990,13 +1997,16 @@ struct sqlite3_mem_methods { ** ** [[SQLITE_CONFIG_LOOKASIDE]]
          SQLITE_CONFIG_LOOKASIDE
          **
          ^(The SQLITE_CONFIG_LOOKASIDE option takes two arguments that determine -** the default size of lookaside memory on each [database connection]. +** the default size of [lookaside memory] on each [database connection]. ** The first argument is the -** size of each lookaside buffer slot and the second is the number of -** slots allocated to each database connection.)^ ^(SQLITE_CONFIG_LOOKASIDE -** sets the default lookaside size. The [SQLITE_DBCONFIG_LOOKASIDE] -** option to [sqlite3_db_config()] can be used to change the lookaside -** configuration on individual connections.)^
          +** size of each lookaside buffer slot ("sz") and the second is the number of +** slots allocated to each database connection ("cnt").)^ +** ^(SQLITE_CONFIG_LOOKASIDE sets the default lookaside size. +** The [SQLITE_DBCONFIG_LOOKASIDE] option to [sqlite3_db_config()] can +** be used to change the lookaside configuration on individual connections.)^ +** The [-DSQLITE_DEFAULT_LOOKASIDE] option can be used to change the +** default lookaside configuration at compile-time. +**
        • ** ** [[SQLITE_CONFIG_PCACHE2]]
          SQLITE_CONFIG_PCACHE2
          **
          ^(The SQLITE_CONFIG_PCACHE2 option takes a single argument which is @@ -2233,31 +2243,50 @@ struct sqlite3_mem_methods { ** [[SQLITE_DBCONFIG_LOOKASIDE]] **
          SQLITE_DBCONFIG_LOOKASIDE
          **
          The SQLITE_DBCONFIG_LOOKASIDE option is used to adjust the -** configuration of the lookaside memory allocator within a database +** configuration of the [lookaside memory allocator] within a database ** connection. ** The arguments to the SQLITE_DBCONFIG_LOOKASIDE option are not ** in the [DBCONFIG arguments|usual format]. ** The SQLITE_DBCONFIG_LOOKASIDE option takes three arguments, not two, ** so that a call to [sqlite3_db_config()] that uses SQLITE_DBCONFIG_LOOKASIDE ** should have a total of five parameters. -** ^The first argument (the third parameter to [sqlite3_db_config()] is a +**
            +**
          1. The first argument ("buf") is a ** pointer to a memory buffer to use for lookaside memory. -** ^The first argument after the SQLITE_DBCONFIG_LOOKASIDE verb -** may be NULL in which case SQLite will allocate the -** lookaside buffer itself using [sqlite3_malloc()]. ^The second argument is the -** size of each lookaside buffer slot. ^The third argument is the number of -** slots. The size of the buffer in the first argument must be greater than -** or equal to the product of the second and third arguments. The buffer -** must be aligned to an 8-byte boundary. ^If the second argument to -** SQLITE_DBCONFIG_LOOKASIDE is not a multiple of 8, it is internally -** rounded down to the next smaller multiple of 8. ^(The lookaside memory +** The first argument may be NULL in which case SQLite will allocate the +** lookaside buffer itself using [sqlite3_malloc()]. +**

          2. The second argument ("sz") is the +** size of each lookaside buffer slot. Lookaside is disabled if "sz" +** is less than 8. The "sz" argument should be a multiple of 8 less than +** 65536. If "sz" does not meet this constraint, it is reduced in size until +** it does. +**

          3. The third argument ("cnt") is the number of slots. Lookaside is disabled +** if "cnt"is less than 1. The "cnt" value will be reduced, if necessary, so +** that the product of "sz" and "cnt" does not exceed 2,147,418,112. The "cnt" +** parameter is usually chosen so that the product of "sz" and "cnt" is less +** than 1,000,000. +**

          +**

          If the "buf" argument is not NULL, then it must +** point to a memory buffer with a size that is greater than +** or equal to the product of "sz" and "cnt". +** The buffer must be aligned to an 8-byte boundary. +** The lookaside memory ** configuration for a database connection can only be changed when that ** connection is not currently using lookaside memory, or in other words -** when the "current value" returned by -** [sqlite3_db_status](D,[SQLITE_DBSTATUS_LOOKASIDE_USED],...) is zero. +** when the value returned by [SQLITE_DBSTATUS_LOOKASIDE_USED] is zero. ** Any attempt to change the lookaside memory configuration when lookaside ** memory is in use leaves the configuration unchanged and returns -** [SQLITE_BUSY].)^

          +** [SQLITE_BUSY]. +** If the "buf" argument is NULL and an attempt +** to allocate memory based on "sz" and "cnt" fails, then +** lookaside is silently disabled. +**

          +** The [SQLITE_CONFIG_LOOKASIDE] configuration option can be used to set the +** default lookaside configuration at initialization. The +** [-DSQLITE_DEFAULT_LOOKASIDE] option can be used to set the default lookaside +** configuration at compile-time. Typical values for lookaside are 1200 for +** "sz" and 40 to 100 for "cnt". +** ** ** [[SQLITE_DBCONFIG_ENABLE_FKEY]] **

          SQLITE_DBCONFIG_ENABLE_FKEY
          @@ -2994,6 +3023,44 @@ SQLITE_API int sqlite3_busy_handler(sqlite3*,int(*)(void*,int),void*); */ SQLITE_API int sqlite3_busy_timeout(sqlite3*, int ms); +/* +** CAPI3REF: Set the Setlk Timeout +** METHOD: sqlite3 +** +** This routine is only useful in SQLITE_ENABLE_SETLK_TIMEOUT builds. If +** the VFS supports blocking locks, it sets the timeout in ms used by +** eligible locks taken on wal mode databases by the specified database +** handle. In non-SQLITE_ENABLE_SETLK_TIMEOUT builds, or if the VFS does +** not support blocking locks, this function is a no-op. +** +** Passing 0 to this function disables blocking locks altogether. Passing +** -1 to this function requests that the VFS blocks for a long time - +** indefinitely if possible. The results of passing any other negative value +** are undefined. +** +** Internally, each SQLite database handle store two timeout values - the +** busy-timeout (used for rollback mode databases, or if the VFS does not +** support blocking locks) and the setlk-timeout (used for blocking locks +** on wal-mode databases). The sqlite3_busy_timeout() method sets both +** values, this function sets only the setlk-timeout value. Therefore, +** to configure separate busy-timeout and setlk-timeout values for a single +** database handle, call sqlite3_busy_timeout() followed by this function. +** +** Whenever the number of connections to a wal mode database falls from +** 1 to 0, the last connection takes an exclusive lock on the database, +** then checkpoints and deletes the wal file. While it is doing this, any +** new connection that tries to read from the database fails with an +** SQLITE_BUSY error. Or, if the SQLITE_SETLK_BLOCK_ON_CONNECT flag is +** passed to this API, the new connection blocks until the exclusive lock +** has been released. +*/ +SQLITE_API int sqlite3_setlk_timeout(sqlite3*, int ms, int flags); + +/* +** CAPI3REF: Flags for sqlite3_setlk_timeout() +*/ +#define SQLITE_SETLK_BLOCK_ON_CONNECT 0x01 + /* ** CAPI3REF: Convenience Routines For Running Queries ** METHOD: sqlite3 @@ -4013,7 +4080,7 @@ SQLITE_API sqlite3_file *sqlite3_database_file_object(const char*); ** ** The sqlite3_create_filename(D,J,W,N,P) allocates memory to hold a version of ** database filename D with corresponding journal file J and WAL file W and -** with N URI parameters key/values pairs in the array P. The result from +** an array P of N URI Key/Value pairs. The result from ** sqlite3_create_filename(D,J,W,N,P) is a pointer to a database filename that ** is safe to pass to routines like: **
            @@ -4694,7 +4761,7 @@ typedef struct sqlite3_context sqlite3_context; ** METHOD: sqlite3_stmt ** ** ^(In the SQL statement text input to [sqlite3_prepare_v2()] and its variants, -** literals may be replaced by a [parameter] that matches one of following +** literals may be replaced by a [parameter] that matches one of the following ** templates: ** **
              @@ -4739,7 +4806,7 @@ typedef struct sqlite3_context sqlite3_context; ** ** [[byte-order determination rules]] ^The byte-order of ** UTF16 input text is determined by the byte-order mark (BOM, U+FEFF) -** found in first character, which is removed, or in the absence of a BOM +** found in the first character, which is removed, or in the absence of a BOM ** the byte order is the native byte order of the host ** machine for sqlite3_bind_text16() or the byte order specified in ** the 6th parameter for sqlite3_bind_text64().)^ @@ -4759,7 +4826,7 @@ typedef struct sqlite3_context sqlite3_context; ** or sqlite3_bind_text16() or sqlite3_bind_text64() then ** that parameter must be the byte offset ** where the NUL terminator would occur assuming the string were NUL -** terminated. If any NUL characters occurs at byte offsets less than +** terminated. If any NUL characters occur at byte offsets less than ** the value of the fourth parameter then the resulting string value will ** contain embedded NULs. The result of expressions involving strings ** with embedded NULs is undefined. @@ -4971,7 +5038,7 @@ SQLITE_API const void *sqlite3_column_name16(sqlite3_stmt*, int N); ** METHOD: sqlite3_stmt ** ** ^These routines provide a means to determine the database, table, and -** table column that is the origin of a particular result column in +** table column that is the origin of a particular result column in a ** [SELECT] statement. ** ^The name of the database or table or column can be returned as ** either a UTF-8 or UTF-16 string. ^The _database_ routines return @@ -5109,7 +5176,7 @@ SQLITE_API const void *sqlite3_column_decltype16(sqlite3_stmt*,int); ** other than [SQLITE_ROW] before any subsequent invocation of ** sqlite3_step(). Failure to reset the prepared statement using ** [sqlite3_reset()] would result in an [SQLITE_MISUSE] return from -** sqlite3_step(). But after [version 3.6.23.1] ([dateof:3.6.23.1], +** sqlite3_step(). But after [version 3.6.23.1] ([dateof:3.6.23.1]), ** sqlite3_step() began ** calling [sqlite3_reset()] automatically in this circumstance rather ** than returning [SQLITE_MISUSE]. This is not considered a compatibility @@ -5540,8 +5607,8 @@ SQLITE_API int sqlite3_reset(sqlite3_stmt *pStmt); ** ** For best security, the [SQLITE_DIRECTONLY] flag is recommended for ** all application-defined SQL functions that do not need to be -** used inside of triggers, view, CHECK constraints, or other elements of -** the database schema. This flags is especially recommended for SQL +** used inside of triggers, views, CHECK constraints, or other elements of +** the database schema. This flag is especially recommended for SQL ** functions that have side effects or reveal internal application state. ** Without this flag, an attacker might be able to modify the schema of ** a database file to include invocations of the function with parameters @@ -5572,7 +5639,7 @@ SQLITE_API int sqlite3_reset(sqlite3_stmt *pStmt); ** [user-defined window functions|available here]. ** ** ^(If the final parameter to sqlite3_create_function_v2() or -** sqlite3_create_window_function() is not NULL, then it is destructor for +** sqlite3_create_window_function() is not NULL, then it is the destructor for ** the application data pointer. The destructor is invoked when the function ** is deleted, either by being overloaded or when the database connection ** closes.)^ ^The destructor is also invoked if the call to @@ -5972,7 +6039,7 @@ SQLITE_API unsigned int sqlite3_value_subtype(sqlite3_value*); ** METHOD: sqlite3_value ** ** ^The sqlite3_value_dup(V) interface makes a copy of the [sqlite3_value] -** object D and returns a pointer to that copy. ^The [sqlite3_value] returned +** object V and returns a pointer to that copy. ^The [sqlite3_value] returned ** is a [protected sqlite3_value] object even if the input is not. ** ^The sqlite3_value_dup(V) interface returns NULL if V is NULL or if a ** memory allocation fails. ^If V is a [pointer value], then the result @@ -6010,7 +6077,7 @@ SQLITE_API void sqlite3_value_free(sqlite3_value*); ** allocation error occurs. ** ** ^(The amount of space allocated by sqlite3_aggregate_context(C,N) is -** determined by the N parameter on first successful call. Changing the +** determined by the N parameter on the first successful call. Changing the ** value of N in any subsequent call to sqlite3_aggregate_context() within ** the same aggregate function instance will not resize the memory ** allocation.)^ Within the xFinal callback, it is customary to set @@ -6172,7 +6239,7 @@ SQLITE_API void sqlite3_set_auxdata(sqlite3_context*, int N, void*, void (*)(voi ** ** Security Warning: These interfaces should not be exposed in scripting ** languages or in other circumstances where it might be possible for an -** an attacker to invoke them. Any agent that can invoke these interfaces +** attacker to invoke them. Any agent that can invoke these interfaces ** can probably also take control of the process. ** ** Database connection client data is only available for SQLite @@ -6286,7 +6353,7 @@ typedef void (*sqlite3_destructor_type)(void*); ** pointed to by the 2nd parameter are taken as the application-defined ** function result. If the 3rd parameter is non-negative, then it ** must be the byte offset into the string where the NUL terminator would -** appear if the string where NUL terminated. If any NUL characters occur +** appear if the string were NUL terminated. If any NUL characters occur ** in the string at a byte offset that is less than the value of the 3rd ** parameter, then the resulting string will contain embedded NULs and the ** result of expressions operating on strings with embedded NULs is undefined. @@ -6344,7 +6411,7 @@ typedef void (*sqlite3_destructor_type)(void*); ** string and preferably a string literal. The sqlite3_result_pointer() ** routine is part of the [pointer passing interface] added for SQLite 3.20.0. ** -** If these routines are called from within the different thread +** If these routines are called from within a different thread ** than the one containing the application-defined function that received ** the [sqlite3_context] pointer, the results are undefined. */ @@ -6750,7 +6817,7 @@ SQLITE_API sqlite3 *sqlite3_db_handle(sqlite3_stmt*); ** METHOD: sqlite3 ** ** ^The sqlite3_db_name(D,N) interface returns a pointer to the schema name -** for the N-th database on database connection D, or a NULL pointer of N is +** for the N-th database on database connection D, or a NULL pointer if N is ** out of range. An N value of 0 means the main database file. An N of 1 is ** the "temp" schema. Larger values of N correspond to various ATTACH-ed ** databases. @@ -6845,7 +6912,7 @@ SQLITE_API int sqlite3_txn_state(sqlite3*,const char *zSchema); **
              The SQLITE_TXN_READ state means that the database is currently ** in a read transaction. Content has been read from the database file ** but nothing in the database file has changed. The transaction state -** will advanced to SQLITE_TXN_WRITE if any changes occur and there are +** will be advanced to SQLITE_TXN_WRITE if any changes occur and there are ** no other conflicting concurrent write transactions. The transaction ** state will revert to SQLITE_TXN_NONE following a [ROLLBACK] or ** [COMMIT].
              @@ -6854,7 +6921,7 @@ SQLITE_API int sqlite3_txn_state(sqlite3*,const char *zSchema); **
              The SQLITE_TXN_WRITE state means that the database is currently ** in a write transaction. Content has been written to the database file ** but has not yet committed. The transaction state will change to -** to SQLITE_TXN_NONE at the next [ROLLBACK] or [COMMIT].
              +** SQLITE_TXN_NONE at the next [ROLLBACK] or [COMMIT]. */ #define SQLITE_TXN_NONE 0 #define SQLITE_TXN_READ 1 @@ -7005,6 +7072,8 @@ SQLITE_API int sqlite3_autovacuum_pages( ** ** ^The second argument is a pointer to the function to invoke when a ** row is updated, inserted or deleted in a rowid table. +** ^The update hook is disabled by invoking sqlite3_update_hook() +** with a NULL pointer as the second parameter. ** ^The first argument to the callback is a copy of the third argument ** to sqlite3_update_hook(). ** ^The second callback argument is one of [SQLITE_INSERT], [SQLITE_DELETE], @@ -7133,7 +7202,7 @@ SQLITE_API int sqlite3_db_release_memory(sqlite3*); ** CAPI3REF: Impose A Limit On Heap Size ** ** These interfaces impose limits on the amount of heap memory that will be -** by all database connections within a single process. +** used by all database connections within a single process. ** ** ^The sqlite3_soft_heap_limit64() interface sets and/or queries the ** soft limit on the amount of heap memory that may be allocated by SQLite. @@ -7191,7 +7260,7 @@ SQLITE_API int sqlite3_db_release_memory(sqlite3*); **
            )^ ** ** The circumstances under which SQLite will enforce the heap limits may -** changes in future releases of SQLite. +** change in future releases of SQLite. */ SQLITE_API sqlite3_int64 sqlite3_soft_heap_limit64(sqlite3_int64 N); SQLITE_API sqlite3_int64 sqlite3_hard_heap_limit64(sqlite3_int64 N); @@ -7306,8 +7375,8 @@ SQLITE_API int sqlite3_table_column_metadata( ** ^The entry point is zProc. ** ^(zProc may be 0, in which case SQLite will try to come up with an ** entry point name on its own. It first tries "sqlite3_extension_init". -** If that does not work, it constructs a name "sqlite3_X_init" where the -** X is consists of the lower-case equivalent of all ASCII alphabetic +** If that does not work, it constructs a name "sqlite3_X_init" where +** X consists of the lower-case equivalent of all ASCII alphabetic ** characters in the filename from the last "/" to the first following ** "." and omitting any initial "lib".)^ ** ^The sqlite3_load_extension() interface returns @@ -7378,7 +7447,7 @@ SQLITE_API int sqlite3_enable_load_extension(sqlite3 *db, int onoff); ** ^(Even though the function prototype shows that xEntryPoint() takes ** no arguments and returns void, SQLite invokes xEntryPoint() with three ** arguments and expects an integer result as if the signature of the -** entry point where as follows: +** entry point were as follows: ** **
             **    int xEntryPoint(
            @@ -7542,7 +7611,7 @@ struct sqlite3_module {
             ** virtual table and might not be checked again by the byte code.)^ ^(The
             ** aConstraintUsage[].omit flag is an optimization hint. When the omit flag
             ** is left in its default setting of false, the constraint will always be
            -** checked separately in byte code.  If the omit flag is change to true, then
            +** checked separately in byte code.  If the omit flag is changed to true, then
             ** the constraint may or may not be checked in byte code.  In other words,
             ** when the omit flag is true there is no guarantee that the constraint will
             ** not be checked again using byte code.)^
            @@ -7568,7 +7637,7 @@ struct sqlite3_module {
             ** The xBestIndex method may optionally populate the idxFlags field with a
             ** mask of SQLITE_INDEX_SCAN_* flags. One such flag is
             ** [SQLITE_INDEX_SCAN_HEX], which if set causes the [EXPLAIN QUERY PLAN]
            -** output to show the idxNum has hex instead of as decimal.  Another flag is
            +** output to show the idxNum as hex instead of as decimal.  Another flag is
             ** SQLITE_INDEX_SCAN_UNIQUE, which if set indicates that the query plan will
             ** return at most one row.
             **
            @@ -7709,7 +7778,7 @@ struct sqlite3_index_info {
             ** the implementation of the [virtual table module].   ^The fourth
             ** parameter is an arbitrary client data pointer that is passed through
             ** into the [xCreate] and [xConnect] methods of the virtual table module
            -** when a new virtual table is be being created or reinitialized.
            +** when a new virtual table is being created or reinitialized.
             **
             ** ^The sqlite3_create_module_v2() interface has a fifth parameter which
             ** is a pointer to a destructor for the pClientData.  ^SQLite will
            @@ -7874,7 +7943,7 @@ typedef struct sqlite3_blob sqlite3_blob;
             ** in *ppBlob. Otherwise an [error code] is returned and, unless the error
             ** code is SQLITE_MISUSE, *ppBlob is set to NULL.)^ ^This means that, provided
             ** the API is not misused, it is always safe to call [sqlite3_blob_close()]
            -** on *ppBlob after this function it returns.
            +** on *ppBlob after this function returns.
             **
             ** This function fails with SQLITE_ERROR if any of the following are true:
             ** 
              @@ -7994,7 +8063,7 @@ SQLITE_API int sqlite3_blob_close(sqlite3_blob *); ** ** ^Returns the size in bytes of the BLOB accessible via the ** successfully opened [BLOB handle] in its only argument. ^The -** incremental blob I/O routines can only read or overwriting existing +** incremental blob I/O routines can only read or overwrite existing ** blob content; they cannot change the size of a blob. ** ** This routine only works on a [BLOB handle] which has been created @@ -8144,7 +8213,7 @@ SQLITE_API int sqlite3_vfs_unregister(sqlite3_vfs*); ** ^The sqlite3_mutex_alloc() routine allocates a new ** mutex and returns a pointer to it. ^The sqlite3_mutex_alloc() ** routine returns NULL if it is unable to allocate the requested -** mutex. The argument to sqlite3_mutex_alloc() must one of these +** mutex. The argument to sqlite3_mutex_alloc() must be one of these ** integer constants: ** **
                @@ -8377,7 +8446,7 @@ SQLITE_API int sqlite3_mutex_notheld(sqlite3_mutex*); ** CAPI3REF: Retrieve the mutex for a database connection ** METHOD: sqlite3 ** -** ^This interface returns a pointer the [sqlite3_mutex] object that +** ^This interface returns a pointer to the [sqlite3_mutex] object that ** serializes access to the [database connection] given in the argument ** when the [threading mode] is Serialized. ** ^If the [threading mode] is Single-thread or Multi-thread then this @@ -8500,7 +8569,7 @@ SQLITE_API int sqlite3_test_control(int op, ...); ** CAPI3REF: SQL Keyword Checking ** ** These routines provide access to the set of SQL language keywords -** recognized by SQLite. Applications can uses these routines to determine +** recognized by SQLite. Applications can use these routines to determine ** whether or not a specific identifier needs to be escaped (for example, ** by enclosing in double-quotes) so as not to confuse the parser. ** @@ -8668,7 +8737,7 @@ SQLITE_API void sqlite3_str_reset(sqlite3_str*); ** content of the dynamic string under construction in X. The value ** returned by [sqlite3_str_value(X)] is managed by the sqlite3_str object X ** and might be freed or altered by any subsequent method on the same -** [sqlite3_str] object. Applications must not used the pointer returned +** [sqlite3_str] object. Applications must not use the pointer returned by ** [sqlite3_str_value(X)] after any subsequent method call on the same ** object. ^Applications may change the content of the string returned ** by [sqlite3_str_value(X)] as long as they do not write into any bytes @@ -8754,7 +8823,7 @@ SQLITE_API int sqlite3_status64( ** allocation which could not be satisfied by the [SQLITE_CONFIG_PAGECACHE] ** buffer and where forced to overflow to [sqlite3_malloc()]. The ** returned value includes allocations that overflowed because they -** where too large (they were larger than the "sz" parameter to +** were too large (they were larger than the "sz" parameter to ** [SQLITE_CONFIG_PAGECACHE]) and allocations that overflowed because ** no space was left in the page cache.)^ ** @@ -8838,28 +8907,29 @@ SQLITE_API int sqlite3_db_status(sqlite3*, int op, int *pCur, int *pHiwtr, int r ** [[SQLITE_DBSTATUS_LOOKASIDE_HIT]] ^(
                SQLITE_DBSTATUS_LOOKASIDE_HIT
                **
                This parameter returns the number of malloc attempts that were ** satisfied using lookaside memory. Only the high-water value is meaningful; -** the current value is always zero.)^ +** the current value is always zero.
                )^ ** ** [[SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE]] ** ^(
                SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE
                -**
                This parameter returns the number malloc attempts that might have +**
                This parameter returns the number of malloc attempts that might have ** been satisfied using lookaside memory but failed due to the amount of ** memory requested being larger than the lookaside slot size. ** Only the high-water value is meaningful; -** the current value is always zero.)^ +** the current value is always zero.
                )^ ** ** [[SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL]] ** ^(
                SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL
                -**
                This parameter returns the number malloc attempts that might have +**
                This parameter returns the number of malloc attempts that might have ** been satisfied using lookaside memory but failed due to all lookaside ** memory already being in use. ** Only the high-water value is meaningful; -** the current value is always zero.)^ +** the current value is always zero.
                )^ ** ** [[SQLITE_DBSTATUS_CACHE_USED]] ^(
                SQLITE_DBSTATUS_CACHE_USED
                **
                This parameter returns the approximate number of bytes of heap ** memory used by all pager caches associated with the database connection.)^ ** ^The highwater mark associated with SQLITE_DBSTATUS_CACHE_USED is always 0. +**
                ** ** [[SQLITE_DBSTATUS_CACHE_USED_SHARED]] ** ^(
                SQLITE_DBSTATUS_CACHE_USED_SHARED
                @@ -8868,10 +8938,10 @@ SQLITE_API int sqlite3_db_status(sqlite3*, int op, int *pCur, int *pHiwtr, int r ** memory used by that pager cache is divided evenly between the attached ** connections.)^ In other words, if none of the pager caches associated ** with the database connection are shared, this request returns the same -** value as DBSTATUS_CACHE_USED. Or, if one or more or the pager caches are +** value as DBSTATUS_CACHE_USED. Or, if one or more of the pager caches are ** shared, the value returned by this call will be smaller than that returned ** by DBSTATUS_CACHE_USED. ^The highwater mark associated with -** SQLITE_DBSTATUS_CACHE_USED_SHARED is always 0. +** SQLITE_DBSTATUS_CACHE_USED_SHARED is always 0. ** ** [[SQLITE_DBSTATUS_SCHEMA_USED]] ^(
                SQLITE_DBSTATUS_SCHEMA_USED
                **
                This parameter returns the approximate number of bytes of heap @@ -8881,6 +8951,7 @@ SQLITE_API int sqlite3_db_status(sqlite3*, int op, int *pCur, int *pHiwtr, int r ** schema memory is shared with other database connections due to ** [shared cache mode] being enabled. ** ^The highwater mark associated with SQLITE_DBSTATUS_SCHEMA_USED is always 0. +**
                ** ** [[SQLITE_DBSTATUS_STMT_USED]] ^(
                SQLITE_DBSTATUS_STMT_USED
                **
                This parameter returns the approximate number of bytes of heap @@ -8917,7 +8988,7 @@ SQLITE_API int sqlite3_db_status(sqlite3*, int op, int *pCur, int *pHiwtr, int r ** been written to disk in the middle of a transaction due to the page ** cache overflowing. Transactions are more efficient if they are written ** to disk all at once. When pages spill mid-transaction, that introduces -** additional overhead. This parameter can be used help identify +** additional overhead. This parameter can be used to help identify ** inefficiencies that can be resolved by increasing the cache size. **
                ** @@ -8988,13 +9059,13 @@ SQLITE_API int sqlite3_stmt_status(sqlite3_stmt*, int op,int resetFlg); ** [[SQLITE_STMTSTATUS_SORT]]
                SQLITE_STMTSTATUS_SORT
                **
                ^This is the number of sort operations that have occurred. ** A non-zero value in this counter may indicate an opportunity to -** improvement performance through careful use of indices.
                +** improve performance through careful use of indices. ** ** [[SQLITE_STMTSTATUS_AUTOINDEX]]
                SQLITE_STMTSTATUS_AUTOINDEX
                **
                ^This is the number of rows inserted into transient indices that ** were created automatically in order to help joins run faster. ** A non-zero value in this counter may indicate an opportunity to -** improvement performance by adding permanent indices that do not +** improve performance by adding permanent indices that do not ** need to be reinitialized each time the statement is run.
                ** ** [[SQLITE_STMTSTATUS_VM_STEP]]
                SQLITE_STMTSTATUS_VM_STEP
                @@ -9003,19 +9074,19 @@ SQLITE_API int sqlite3_stmt_status(sqlite3_stmt*, int op,int resetFlg); ** to 2147483647. The number of virtual machine operations can be ** used as a proxy for the total work done by the prepared statement. ** If the number of virtual machine operations exceeds 2147483647 -** then the value returned by this statement status code is undefined. +** then the value returned by this statement status code is undefined. ** ** [[SQLITE_STMTSTATUS_REPREPARE]]
                SQLITE_STMTSTATUS_REPREPARE
                **
                ^This is the number of times that the prepare statement has been ** automatically regenerated due to schema changes or changes to -** [bound parameters] that might affect the query plan. +** [bound parameters] that might affect the query plan.
                ** ** [[SQLITE_STMTSTATUS_RUN]]
                SQLITE_STMTSTATUS_RUN
                **
                ^This is the number of times that the prepared statement has ** been run. A single "run" for the purposes of this counter is one ** or more calls to [sqlite3_step()] followed by a call to [sqlite3_reset()]. ** The counter is incremented on the first [sqlite3_step()] call of each -** cycle. +** cycle.
                ** ** [[SQLITE_STMTSTATUS_FILTER_MISS]] ** [[SQLITE_STMTSTATUS_FILTER HIT]] @@ -9025,7 +9096,7 @@ SQLITE_API int sqlite3_stmt_status(sqlite3_stmt*, int op,int resetFlg); ** step was bypassed because a Bloom filter returned not-found. The ** corresponding SQLITE_STMTSTATUS_FILTER_MISS value is the number of ** times that the Bloom filter returned a find, and thus the join step -** had to be processed as normal. +** had to be processed as normal. ** ** [[SQLITE_STMTSTATUS_MEMUSED]]
                SQLITE_STMTSTATUS_MEMUSED
                **
                ^This is the approximate number of bytes of heap memory @@ -9130,9 +9201,9 @@ struct sqlite3_pcache_page { ** SQLite will typically create one cache instance for each open database file, ** though this is not guaranteed. ^The ** first parameter, szPage, is the size in bytes of the pages that must -** be allocated by the cache. ^szPage will always a power of two. ^The +** be allocated by the cache. ^szPage will always be a power of two. ^The ** second parameter szExtra is a number of bytes of extra storage -** associated with each page cache entry. ^The szExtra parameter will +** associated with each page cache entry. ^The szExtra parameter will be ** a number less than 250. SQLite will use the ** extra szExtra bytes on each page to store metadata about the underlying ** database page on disk. The value passed into szExtra depends @@ -9140,17 +9211,17 @@ struct sqlite3_pcache_page { ** ^The third argument to xCreate(), bPurgeable, is true if the cache being ** created will be used to cache database pages of a file stored on disk, or ** false if it is used for an in-memory database. The cache implementation -** does not have to do anything special based with the value of bPurgeable; +** does not have to do anything special based upon the value of bPurgeable; ** it is purely advisory. ^On a cache where bPurgeable is false, SQLite will ** never invoke xUnpin() except to deliberately delete a page. ** ^In other words, calls to xUnpin() on a cache with bPurgeable set to ** false will always have the "discard" flag set to true. -** ^Hence, a cache created with bPurgeable false will +** ^Hence, a cache created with bPurgeable set to false will ** never contain any unpinned pages. ** ** [[the xCachesize() page cache method]] ** ^(The xCachesize() method may be called at any time by SQLite to set the -** suggested maximum cache-size (number of pages stored by) the cache +** suggested maximum cache-size (number of pages stored) for the cache ** instance passed as the first argument. This is the value configured using ** the SQLite "[PRAGMA cache_size]" command.)^ As with the bPurgeable ** parameter, the implementation is not required to do anything with this @@ -9177,12 +9248,12 @@ struct sqlite3_pcache_page { ** implementation must return a pointer to the page buffer with its content ** intact. If the requested page is not already in the cache, then the ** cache implementation should use the value of the createFlag -** parameter to help it determined what action to take: +** parameter to help it determine what action to take: ** ** **
                createFlag Behavior when page is not already in cache **
                0 Do not allocate a new page. Return NULL. -**
                1 Allocate a new page if it easy and convenient to do so. +**
                1 Allocate a new page if it is easy and convenient to do so. ** Otherwise return NULL. **
                2 Make every effort to allocate a new page. Only return ** NULL if allocating a new page is effectively impossible. @@ -9199,7 +9270,7 @@ struct sqlite3_pcache_page { ** as its second argument. If the third parameter, discard, is non-zero, ** then the page must be evicted from the cache. ** ^If the discard parameter is -** zero, then the page may be discarded or retained at the discretion of +** zero, then the page may be discarded or retained at the discretion of the ** page cache implementation. ^The page cache implementation ** may choose to evict unpinned pages at any time. ** @@ -9217,7 +9288,7 @@ struct sqlite3_pcache_page { ** When SQLite calls the xTruncate() method, the cache must discard all ** existing cache entries with page numbers (keys) greater than or equal ** to the value of the iLimit parameter passed to xTruncate(). If any -** of these pages are pinned, they are implicitly unpinned, meaning that +** of these pages are pinned, they become implicitly unpinned, meaning that ** they can be safely discarded. ** ** [[the xDestroy() page cache method]] @@ -9397,7 +9468,7 @@ typedef struct sqlite3_backup sqlite3_backup; ** external process or via a database connection other than the one being ** used by the backup operation, then the backup will be automatically ** restarted by the next call to sqlite3_backup_step(). ^If the source -** database is modified by the using the same database connection as is used +** database is modified by using the same database connection as is used ** by the backup operation, then the backup database is automatically ** updated at the same time. ** @@ -9414,7 +9485,7 @@ typedef struct sqlite3_backup sqlite3_backup; ** and may not be used following a call to sqlite3_backup_finish(). ** ** ^The value returned by sqlite3_backup_finish is [SQLITE_OK] if no -** sqlite3_backup_step() errors occurred, regardless or whether or not +** sqlite3_backup_step() errors occurred, regardless of whether or not ** sqlite3_backup_step() completed. ** ^If an out-of-memory condition or IO error occurred during any prior ** sqlite3_backup_step() call on the same [sqlite3_backup] object, then @@ -9516,7 +9587,7 @@ SQLITE_API int sqlite3_backup_pagecount(sqlite3_backup *p); ** application receives an SQLITE_LOCKED error, it may call the ** sqlite3_unlock_notify() method with the blocked connection handle as ** the first argument to register for a callback that will be invoked -** when the blocking connections current transaction is concluded. ^The +** when the blocking connection's current transaction is concluded. ^The ** callback is invoked from within the [sqlite3_step] or [sqlite3_close] ** call that concludes the blocking connection's transaction. ** @@ -9536,7 +9607,7 @@ SQLITE_API int sqlite3_backup_pagecount(sqlite3_backup *p); ** blocked connection already has a registered unlock-notify callback, ** then the new callback replaces the old.)^ ^If sqlite3_unlock_notify() is ** called with a NULL pointer as its second argument, then any existing -** unlock-notify callback is canceled. ^The blocked connections +** unlock-notify callback is canceled. ^The blocked connection's ** unlock-notify callback may also be canceled by closing the blocked ** connection using [sqlite3_close()]. ** @@ -9934,7 +10005,7 @@ SQLITE_API int sqlite3_vtab_config(sqlite3*, int op, ...); ** support constraints. In this configuration (which is the default) if ** a call to the [xUpdate] method returns [SQLITE_CONSTRAINT], then the entire ** statement is rolled back as if [ON CONFLICT | OR ABORT] had been -** specified as part of the users SQL statement, regardless of the actual +** specified as part of the user's SQL statement, regardless of the actual ** ON CONFLICT mode specified. ** ** If X is non-zero, then the virtual table implementation guarantees @@ -9968,7 +10039,7 @@ SQLITE_API int sqlite3_vtab_config(sqlite3*, int op, ...); ** [[SQLITE_VTAB_INNOCUOUS]]
                SQLITE_VTAB_INNOCUOUS
                **
                Calls of the form ** [sqlite3_vtab_config](db,SQLITE_VTAB_INNOCUOUS) from within the -** the [xConnect] or [xCreate] methods of a [virtual table] implementation +** [xConnect] or [xCreate] methods of a [virtual table] implementation ** identify that virtual table as being safe to use from within triggers ** and views. Conceptually, the SQLITE_VTAB_INNOCUOUS tag means that the ** virtual table can do no serious harm even if it is controlled by a @@ -10136,7 +10207,7 @@ SQLITE_API const char *sqlite3_vtab_collation(sqlite3_index_info*,int); **
                ** ** ^For the purposes of comparing virtual table output values to see if the -** values are same value for sorting purposes, two NULL values are considered +** values are the same value for sorting purposes, two NULL values are considered ** to be the same. In other words, the comparison operator is "IS" ** (or "IS NOT DISTINCT FROM") and not "==". ** @@ -10146,7 +10217,7 @@ SQLITE_API const char *sqlite3_vtab_collation(sqlite3_index_info*,int); ** ** ^A virtual table implementation is always free to return rows in any order ** it wants, as long as the "orderByConsumed" flag is not set. ^When the -** the "orderByConsumed" flag is unset, the query planner will add extra +** "orderByConsumed" flag is unset, the query planner will add extra ** [bytecode] to ensure that the final results returned by the SQL query are ** ordered correctly. The use of the "orderByConsumed" flag and the ** sqlite3_vtab_distinct() interface is merely an optimization. ^Careful @@ -10243,7 +10314,7 @@ SQLITE_API int sqlite3_vtab_in(sqlite3_index_info*, int iCons, int bHandle); ** sqlite3_vtab_in_next(X,P) should be one of the parameters to the ** xFilter method which invokes these routines, and specifically ** a parameter that was previously selected for all-at-once IN constraint -** processing use the [sqlite3_vtab_in()] interface in the +** processing using the [sqlite3_vtab_in()] interface in the ** [xBestIndex|xBestIndex method]. ^(If the X parameter is not ** an xFilter argument that was selected for all-at-once IN constraint ** processing, then these routines return [SQLITE_ERROR].)^ @@ -10298,7 +10369,7 @@ SQLITE_API int sqlite3_vtab_in_next(sqlite3_value *pVal, sqlite3_value **ppOut); ** and only if *V is set to a value. ^The sqlite3_vtab_rhs_value(P,J,V) ** inteface returns SQLITE_NOTFOUND if the right-hand side of the J-th ** constraint is not available. ^The sqlite3_vtab_rhs_value() interface -** can return an result code other than SQLITE_OK or SQLITE_NOTFOUND if +** can return a result code other than SQLITE_OK or SQLITE_NOTFOUND if ** something goes wrong. ** ** The sqlite3_vtab_rhs_value() interface is usually only successful if @@ -10326,8 +10397,8 @@ SQLITE_API int sqlite3_vtab_rhs_value(sqlite3_index_info*, int, sqlite3_value ** ** KEYWORDS: {conflict resolution mode} ** ** These constants are returned by [sqlite3_vtab_on_conflict()] to -** inform a [virtual table] implementation what the [ON CONFLICT] mode -** is for the SQL statement being evaluated. +** inform a [virtual table] implementation of the [ON CONFLICT] mode +** for the SQL statement being evaluated. ** ** Note that the [SQLITE_IGNORE] constant is also used as a potential ** return value from the [sqlite3_set_authorizer()] callback and that @@ -10367,39 +10438,39 @@ SQLITE_API int sqlite3_vtab_rhs_value(sqlite3_index_info*, int, sqlite3_value ** ** [[SQLITE_SCANSTAT_EST]]
                SQLITE_SCANSTAT_EST
                **
                ^The "double" variable pointed to by the V parameter will be set to the ** query planner's estimate for the average number of rows output from each -** iteration of the X-th loop. If the query planner's estimates was accurate, +** iteration of the X-th loop. If the query planner's estimate was accurate, ** then this value will approximate the quotient NVISIT/NLOOP and the ** product of this value for all prior loops with the same SELECTID will -** be the NLOOP value for the current loop. +** be the NLOOP value for the current loop.
                ** ** [[SQLITE_SCANSTAT_NAME]]
                SQLITE_SCANSTAT_NAME
                **
                ^The "const char *" variable pointed to by the V parameter will be set ** to a zero-terminated UTF-8 string containing the name of the index or table -** used for the X-th loop. +** used for the X-th loop.
                ** ** [[SQLITE_SCANSTAT_EXPLAIN]]
                SQLITE_SCANSTAT_EXPLAIN
                **
                ^The "const char *" variable pointed to by the V parameter will be set ** to a zero-terminated UTF-8 string containing the [EXPLAIN QUERY PLAN] -** description for the X-th loop. +** description for the X-th loop.
                ** ** [[SQLITE_SCANSTAT_SELECTID]]
                SQLITE_SCANSTAT_SELECTID
                **
                ^The "int" variable pointed to by the V parameter will be set to the ** id for the X-th query plan element. The id value is unique within the ** statement. The select-id is the same value as is output in the first -** column of an [EXPLAIN QUERY PLAN] query. +** column of an [EXPLAIN QUERY PLAN] query.
                ** ** [[SQLITE_SCANSTAT_PARENTID]]
                SQLITE_SCANSTAT_PARENTID
                **
                The "int" variable pointed to by the V parameter will be set to the -** the id of the parent of the current query element, if applicable, or +** id of the parent of the current query element, if applicable, or ** to zero if the query element has no parent. This is the same value as -** returned in the second column of an [EXPLAIN QUERY PLAN] query. +** returned in the second column of an [EXPLAIN QUERY PLAN] query.
                ** ** [[SQLITE_SCANSTAT_NCYCLE]]
                SQLITE_SCANSTAT_NCYCLE
                **
                The sqlite3_int64 output value is set to the number of cycles, ** according to the processor time-stamp counter, that elapsed while the ** query element was being processed. This value is not available for ** all query elements - if it is unavailable the output variable is -** set to -1. +** set to -1.
                ** */ #define SQLITE_SCANSTAT_NLOOP 0 @@ -10440,8 +10511,8 @@ SQLITE_API int sqlite3_vtab_rhs_value(sqlite3_index_info*, int, sqlite3_value ** ** sqlite3_stmt_scanstatus_v2() with a zeroed flags parameter. ** ** Parameter "idx" identifies the specific query element to retrieve statistics -** for. Query elements are numbered starting from zero. A value of -1 may be -** to query for statistics regarding the entire query. ^If idx is out of range +** for. Query elements are numbered starting from zero. A value of -1 may +** retrieve statistics for the entire query. ^If idx is out of range ** - less than -1 or greater than or equal to the total number of query ** elements used to implement the statement - a non-zero value is returned and ** the variable that pOut points to is unchanged. @@ -10484,7 +10555,7 @@ SQLITE_API void sqlite3_stmt_scanstatus_reset(sqlite3_stmt*); ** METHOD: sqlite3 ** ** ^If a write-transaction is open on [database connection] D when the -** [sqlite3_db_cacheflush(D)] interface invoked, any dirty +** [sqlite3_db_cacheflush(D)] interface is invoked, any dirty ** pages in the pager-cache that are not currently in use are written out ** to disk. A dirty page may be in use if a database cursor created by an ** active SQL statement is reading from it, or if it is page 1 of a database @@ -10598,8 +10669,8 @@ SQLITE_API int sqlite3_db_cacheflush(sqlite3*); ** triggers; and so forth. ** ** When the [sqlite3_blob_write()] API is used to update a blob column, -** the pre-update hook is invoked with SQLITE_DELETE. This is because the -** in this case the new values are not available. In this case, when a +** the pre-update hook is invoked with SQLITE_DELETE, because +** the new values are not yet available. In this case, when a ** callback made with op==SQLITE_DELETE is actually a write using the ** sqlite3_blob_write() API, the [sqlite3_preupdate_blobwrite()] returns ** the index of the column being written. In other cases, where the @@ -10852,7 +10923,7 @@ SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_recover(sqlite3 *db, const c ** For an ordinary on-disk database file, the serialization is just a ** copy of the disk file. For an in-memory database or a "TEMP" database, ** the serialization is the same sequence of bytes which would be written -** to disk if that database where backed up to disk. +** to disk if that database were backed up to disk. ** ** The usual case is that sqlite3_serialize() copies the serialization of ** the database into memory obtained from [sqlite3_malloc64()] and returns @@ -10861,7 +10932,7 @@ SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_recover(sqlite3 *db, const c ** contains the SQLITE_SERIALIZE_NOCOPY bit, then no memory allocations ** are made, and the sqlite3_serialize() function will return a pointer ** to the contiguous memory representation of the database that SQLite -** is currently using for that database, or NULL if the no such contiguous +** is currently using for that database, or NULL if no such contiguous ** memory representation of the database exists. A contiguous memory ** representation of the database will usually only exist if there has ** been a prior call to [sqlite3_deserialize(D,S,...)] with the same @@ -10932,7 +11003,7 @@ SQLITE_API unsigned char *sqlite3_serialize( ** database is currently in a read transaction or is involved in a backup ** operation. ** -** It is not possible to deserialized into the TEMP database. If the +** It is not possible to deserialize into the TEMP database. If the ** S argument to sqlite3_deserialize(D,S,P,N,M,F) is "temp" then the ** function returns SQLITE_ERROR. ** @@ -10954,7 +11025,7 @@ SQLITE_API int sqlite3_deserialize( sqlite3 *db, /* The database connection */ const char *zSchema, /* Which DB to reopen with the deserialization */ unsigned char *pData, /* The serialized database content */ - sqlite3_int64 szDb, /* Number bytes in the deserialization */ + sqlite3_int64 szDb, /* Number of bytes in the deserialization */ sqlite3_int64 szBuf, /* Total size of buffer pData[] */ unsigned mFlags /* Zero or more SQLITE_DESERIALIZE_* flags */ ); @@ -10962,7 +11033,7 @@ SQLITE_API int sqlite3_deserialize( /* ** CAPI3REF: Flags for sqlite3_deserialize() ** -** The following are allowed values for 6th argument (the F argument) to +** The following are allowed values for the 6th argument (the F argument) to ** the [sqlite3_deserialize(D,S,P,N,M,F)] interface. ** ** The SQLITE_DESERIALIZE_FREEONCLOSE means that the database serialization @@ -11487,9 +11558,10 @@ SQLITE_API void sqlite3session_table_filter( ** is inserted while a session object is enabled, then later deleted while ** the same session object is disabled, no INSERT record will appear in the ** changeset, even though the delete took place while the session was disabled. -** Or, if one field of a row is updated while a session is disabled, and -** another field of the same row is updated while the session is enabled, the -** resulting changeset will contain an UPDATE change that updates both fields. +** Or, if one field of a row is updated while a session is enabled, and +** then another field of the same row is updated while the session is disabled, +** the resulting changeset will contain an UPDATE change that updates both +** fields. */ SQLITE_API int sqlite3session_changeset( sqlite3_session *pSession, /* Session object */ @@ -11561,8 +11633,9 @@ SQLITE_API sqlite3_int64 sqlite3session_changeset_size(sqlite3_session *pSession ** database zFrom the contents of the two compatible tables would be ** identical. ** -** It an error if database zFrom does not exist or does not contain the -** required compatible table. +** Unless the call to this function is a no-op as described above, it is an +** error if database zFrom does not exist or does not contain the required +** compatible table. ** ** If the operation is successful, SQLITE_OK is returned. Otherwise, an SQLite ** error code. In this case, if argument pzErrMsg is not NULL, *pzErrMsg @@ -11697,7 +11770,7 @@ SQLITE_API int sqlite3changeset_start_v2( ** The following flags may passed via the 4th parameter to ** [sqlite3changeset_start_v2] and [sqlite3changeset_start_v2_strm]: ** -**
                SQLITE_CHANGESETAPPLY_INVERT
                +**
                SQLITE_CHANGESETSTART_INVERT
                ** Invert the changeset while iterating through it. This is equivalent to ** inverting a changeset using sqlite3changeset_invert() before applying it. ** It is an error to specify this flag with a patchset. @@ -12012,19 +12085,6 @@ SQLITE_API int sqlite3changeset_concat( void **ppOut /* OUT: Buffer containing output changeset */ ); - -/* -** CAPI3REF: Upgrade the Schema of a Changeset/Patchset -*/ -SQLITE_API int sqlite3changeset_upgrade( - sqlite3 *db, - const char *zDb, - int nIn, const void *pIn, /* Input changeset */ - int *pnOut, void **ppOut /* OUT: Inverse of input */ -); - - - /* ** CAPI3REF: Changegroup Handle ** diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_userauth.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_userauth.go index 76d840164..5a4927665 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_userauth.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_userauth.go @@ -16,53 +16,10 @@ package sqlite3 #else #include #endif -#include - -static int -_sqlite3_user_authenticate(sqlite3* db, const char* zUsername, const char* aPW, int nPW) -{ - return sqlite3_user_authenticate(db, zUsername, aPW, nPW); -} - -static int -_sqlite3_user_add(sqlite3* db, const char* zUsername, const char* aPW, int nPW, int isAdmin) -{ - return sqlite3_user_add(db, zUsername, aPW, nPW, isAdmin); -} - -static int -_sqlite3_user_change(sqlite3* db, const char* zUsername, const char* aPW, int nPW, int isAdmin) -{ - return sqlite3_user_change(db, zUsername, aPW, nPW, isAdmin); -} - -static int -_sqlite3_user_delete(sqlite3* db, const char* zUsername) -{ - return sqlite3_user_delete(db, zUsername); -} - -static int -_sqlite3_auth_enabled(sqlite3* db) -{ - int exists = -1; - - sqlite3_stmt *stmt; - sqlite3_prepare_v2(db, "select count(type) from sqlite_master WHERE type='table' and name='sqlite_user';", -1, &stmt, NULL); - - while ( sqlite3_step(stmt) == SQLITE_ROW) { - exists = sqlite3_column_int(stmt, 0); - } - - sqlite3_finalize(stmt); - - return exists; -} */ import "C" import ( "errors" - "unsafe" ) const ( @@ -70,8 +27,9 @@ const ( ) var ( - ErrUnauthorized = errors.New("SQLITE_AUTH: Unauthorized") - ErrAdminRequired = errors.New("SQLITE_AUTH: Unauthorized; Admin Privileges Required") + ErrUnauthorized = errors.New("SQLITE_AUTH: Unauthorized") + ErrAdminRequired = errors.New("SQLITE_AUTH: Unauthorized; Admin Privileges Required") + errUserAuthNoLongerSupported = errors.New("sqlite3: the sqlite_userauth tag is no longer supported as the userauth extension is no longer supported by the SQLite authors, see https://github.com/mattn/go-sqlite3/issues/1341") ) // Authenticate will perform an authentication of the provided username @@ -88,15 +46,7 @@ var ( // If the SQLITE_USER table is not present in the database file, then // this interface is a harmless no-op returning SQLITE_OK. func (c *SQLiteConn) Authenticate(username, password string) error { - rv := c.authenticate(username, password) - switch rv { - case C.SQLITE_ERROR, C.SQLITE_AUTH: - return ErrUnauthorized - case C.SQLITE_OK: - return nil - default: - return c.lastError() - } + return errUserAuthNoLongerSupported } // authenticate provides the actual authentication to SQLite. @@ -109,17 +59,7 @@ func (c *SQLiteConn) Authenticate(username, password string) error { // C.SQLITE_ERROR (1) // C.SQLITE_AUTH (23) func (c *SQLiteConn) authenticate(username, password string) int { - // Allocate C Variables - cuser := C.CString(username) - cpass := C.CString(password) - - // Free C Variables - defer func() { - C.free(unsafe.Pointer(cuser)) - C.free(unsafe.Pointer(cpass)) - }() - - return int(C._sqlite3_user_authenticate(c.db, cuser, cpass, C.int(len(password)))) + return 1 } // AuthUserAdd can be used (by an admin user only) @@ -131,20 +71,7 @@ func (c *SQLiteConn) authenticate(username, password string) int { // for any ATTACH-ed databases. Any call to AuthUserAdd by a // non-admin user results in an error. func (c *SQLiteConn) AuthUserAdd(username, password string, admin bool) error { - isAdmin := 0 - if admin { - isAdmin = 1 - } - - rv := c.authUserAdd(username, password, isAdmin) - switch rv { - case C.SQLITE_ERROR, C.SQLITE_AUTH: - return ErrAdminRequired - case C.SQLITE_OK: - return nil - default: - return c.lastError() - } + return errUserAuthNoLongerSupported } // authUserAdd enables the User Authentication if not enabled. @@ -162,17 +89,7 @@ func (c *SQLiteConn) AuthUserAdd(username, password string, admin bool) error { // C.SQLITE_ERROR (1) // C.SQLITE_AUTH (23) func (c *SQLiteConn) authUserAdd(username, password string, admin int) int { - // Allocate C Variables - cuser := C.CString(username) - cpass := C.CString(password) - - // Free C Variables - defer func() { - C.free(unsafe.Pointer(cuser)) - C.free(unsafe.Pointer(cpass)) - }() - - return int(C._sqlite3_user_add(c.db, cuser, cpass, C.int(len(password)), C.int(admin))) + return 1 } // AuthUserChange can be used to change a users @@ -181,20 +98,7 @@ func (c *SQLiteConn) authUserAdd(username, password string, admin int) int { // credentials or admin privilege setting. No user may change their own // admin privilege setting. func (c *SQLiteConn) AuthUserChange(username, password string, admin bool) error { - isAdmin := 0 - if admin { - isAdmin = 1 - } - - rv := c.authUserChange(username, password, isAdmin) - switch rv { - case C.SQLITE_ERROR, C.SQLITE_AUTH: - return ErrAdminRequired - case C.SQLITE_OK: - return nil - default: - return c.lastError() - } + return errUserAuthNoLongerSupported } // authUserChange allows to modify a user. @@ -215,17 +119,7 @@ func (c *SQLiteConn) AuthUserChange(username, password string, admin bool) error // C.SQLITE_ERROR (1) // C.SQLITE_AUTH (23) func (c *SQLiteConn) authUserChange(username, password string, admin int) int { - // Allocate C Variables - cuser := C.CString(username) - cpass := C.CString(password) - - // Free C Variables - defer func() { - C.free(unsafe.Pointer(cuser)) - C.free(unsafe.Pointer(cpass)) - }() - - return int(C._sqlite3_user_change(c.db, cuser, cpass, C.int(len(password)), C.int(admin))) + return 1 } // AuthUserDelete can be used (by an admin user only) @@ -234,15 +128,7 @@ func (c *SQLiteConn) authUserChange(username, password string, admin int) int { // the database cannot be converted into a no-authentication-required // database. func (c *SQLiteConn) AuthUserDelete(username string) error { - rv := c.authUserDelete(username) - switch rv { - case C.SQLITE_ERROR, C.SQLITE_AUTH: - return ErrAdminRequired - case C.SQLITE_OK: - return nil - default: - return c.lastError() - } + return errUserAuthNoLongerSupported } // authUserDelete can be used to delete a user. @@ -258,25 +144,12 @@ func (c *SQLiteConn) AuthUserDelete(username string) error { // C.SQLITE_ERROR (1) // C.SQLITE_AUTH (23) func (c *SQLiteConn) authUserDelete(username string) int { - // Allocate C Variables - cuser := C.CString(username) - - // Free C Variables - defer func() { - C.free(unsafe.Pointer(cuser)) - }() - - return int(C._sqlite3_user_delete(c.db, cuser)) + return 1 } // AuthEnabled checks if the database is protected by user authentication func (c *SQLiteConn) AuthEnabled() (exists bool) { - rv := c.authEnabled() - if rv == 1 { - exists = true - } - - return + return false } // authEnabled perform the actual check for user authentication. @@ -289,7 +162,7 @@ func (c *SQLiteConn) AuthEnabled() (exists bool) { // 0 - Disabled // 1 - Enabled func (c *SQLiteConn) authEnabled() int { - return int(C._sqlite3_auth_enabled(c.db)) + return 0 } // EOF diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3ext.h b/vendor/github.com/mattn/go-sqlite3/sqlite3ext.h index 935437bb6..3a5e0a4ed 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3ext.h +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3ext.h @@ -371,6 +371,8 @@ struct sqlite3_api_routines { /* Version 3.44.0 and later */ void *(*get_clientdata)(sqlite3*,const char*); int (*set_clientdata)(sqlite3*, const char*, void*, void(*)(void*)); + /* Version 3.50.0 and later */ + int (*setlk_timeout)(sqlite3*,int,int); }; /* @@ -704,6 +706,8 @@ typedef int (*sqlite3_loadext_entry)( /* Version 3.44.0 and later */ #define sqlite3_get_clientdata sqlite3_api->get_clientdata #define sqlite3_set_clientdata sqlite3_api->set_clientdata +/* Version 3.50.0 and later */ +#define sqlite3_setlk_timeout sqlite3_api->setlk_timeout #endif /* !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) */ #if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) diff --git a/vendor/github.com/operator-framework/operator-registry/alpha/declcfg/write.go b/vendor/github.com/operator-framework/operator-registry/alpha/declcfg/write.go index 293d9363b..52b2a67f3 100644 --- a/vendor/github.com/operator-framework/operator-registry/alpha/declcfg/write.go +++ b/vendor/github.com/operator-framework/operator-registry/alpha/declcfg/write.go @@ -7,6 +7,7 @@ import ( "io" "os" "path/filepath" + "slices" "sort" "strings" @@ -20,6 +21,7 @@ import ( type MermaidWriter struct { MinEdgeName string SpecifiedPackageName string + DrawV0Semantics bool } type MermaidOption func(*MermaidWriter) @@ -32,6 +34,7 @@ func NewMermaidWriter(opts ...MermaidOption) *MermaidWriter { m := &MermaidWriter{ MinEdgeName: minEdgeName, SpecifiedPackageName: specifiedPackageName, + DrawV0Semantics: true, } for _, opt := range opts { @@ -52,6 +55,12 @@ func WithSpecifiedPackageName(specifiedPackageName string) MermaidOption { } } +func WithV0Semantics(drawV0Semantics bool) MermaidOption { + return func(o *MermaidWriter) { + o.DrawV0Semantics = drawV0Semantics + } +} + // writes out the channel edges of the declarative config graph in a mermaid format capable of being pasted into // mermaid renderers like github, mermaid.live, etc. // output is sorted lexicographically by package name, and then by channel name @@ -124,7 +133,10 @@ func (writer *MermaidWriter) WriteChannels(cfg DeclarativeConfig, out io.Writer) } var deprecatedPackage string - deprecatedChannels := []string{} + deprecatedChannelIDs := []string{} + decoratedBundleIDs := map[string][]string{"deprecated": {}, "skipped": {}, "deprecatedskipped": {}} + linkID := 0 + skippedLinkIDs := []string{} for _, c := range cfg.Channels { filteredChannel := writer.filterChannel(&c, versionMap, minVersion, minEdgePackage) @@ -137,58 +149,102 @@ func (writer *MermaidWriter) WriteChannels(cfg DeclarativeConfig, out io.Writer) } channelID := fmt.Sprintf("%s-%s", filteredChannel.Package, filteredChannel.Name) - pkgBuilder.WriteString(fmt.Sprintf(" %%%% channel %q\n", filteredChannel.Name)) - pkgBuilder.WriteString(fmt.Sprintf(" subgraph %s[%q]\n", channelID, filteredChannel.Name)) + fmt.Fprintf(pkgBuilder, " %%%% channel %q\n", filteredChannel.Name) + fmt.Fprintf(pkgBuilder, " subgraph %s[%q]\n", channelID, filteredChannel.Name) if depByPackage.Has(filteredChannel.Package) { deprecatedPackage = filteredChannel.Package } if depByChannel.Has(filteredChannel.Name) { - deprecatedChannels = append(deprecatedChannels, channelID) + deprecatedChannelIDs = append(deprecatedChannelIDs, channelID) } - for _, ce := range filteredChannel.Entries { - if versionMap[ce.Name].GE(minVersion) { - bundleDeprecation := "" - if depByBundle.Has(ce.Name) { - bundleDeprecation = ":::deprecated" + // sort edges by decreasing version + sortedEntries := make([]*ChannelEntry, 0, len(filteredChannel.Entries)) + for i := range filteredChannel.Entries { + sortedEntries = append(sortedEntries, &filteredChannel.Entries[i]) + } + sort.Slice(sortedEntries, func(i, j int) bool { + // Sort by decreasing version: greater version comes first + return versionMap[sortedEntries[i].Name].GT(versionMap[sortedEntries[j].Name]) + }) + + skippedEntities := sets.Set[string]{} + + const ( + captureNewEntry = true + processExisting = false + ) + handleSemantics := func(edge string, linkID int, captureNew bool) { + if writer.DrawV0Semantics { + if captureNew { + if skippedEntities.Has(edge) { + skippedLinkIDs = append(skippedLinkIDs, fmt.Sprintf("%d", linkID)) + } else { + skippedEntities.Insert(edge) + } + } else { + if skippedEntities.Has(edge) { + skippedLinkIDs = append(skippedLinkIDs, fmt.Sprintf("%d", linkID)) + } } + } + } - entryID := fmt.Sprintf("%s-%s", channelID, ce.Name) - pkgBuilder.WriteString(fmt.Sprintf(" %s[%q]%s\n", entryID, ce.Name, bundleDeprecation)) + for _, ce := range sortedEntries { + entryID := fmt.Sprintf("%s-%s", channelID, ce.Name) + fmt.Fprintf(pkgBuilder, " %s[%q]\n", entryID, ce.Name) + + // mermaid allows specification of only a single decoration class, so any combinations must be independently represented + switch { + case depByBundle.Has(ce.Name) && skippedEntities.Has(ce.Name): + decoratedBundleIDs["deprecatedskipped"] = append(decoratedBundleIDs["deprecatedskipped"], entryID) + case depByBundle.Has(ce.Name): + decoratedBundleIDs["deprecated"] = append(decoratedBundleIDs["deprecated"], entryID) + case skippedEntities.Has(ce.Name): + decoratedBundleIDs["skipped"] = append(decoratedBundleIDs["skipped"], entryID) + } - if len(ce.Replaces) > 0 { - replacesID := fmt.Sprintf("%s-%s", channelID, ce.Replaces) - pkgBuilder.WriteString(fmt.Sprintf(" %s[%q]-- %s --> %s[%q]\n", replacesID, ce.Replaces, "replace", entryID, ce.Name)) - } - if len(ce.Skips) > 0 { - for _, s := range ce.Skips { - skipsID := fmt.Sprintf("%s-%s", channelID, s) - pkgBuilder.WriteString(fmt.Sprintf(" %s[%q]-- %s --> %s[%q]\n", skipsID, s, "skip", entryID, ce.Name)) - } + if len(ce.Skips) > 0 { + for _, s := range ce.Skips { + skipsID := fmt.Sprintf("%s-%s", channelID, s) + fmt.Fprintf(pkgBuilder, " %s[%q]-- %s --> %s[%q]\n", skipsID, s, "skip", entryID, ce.Name) + handleSemantics(s, linkID, captureNewEntry) + linkID++ } - if len(ce.SkipRange) > 0 { - skipRange, err := semver.ParseRange(ce.SkipRange) - if err == nil { - for _, edgeName := range filteredChannel.Entries { - if skipRange(versionMap[edgeName.Name]) { - skipRangeID := fmt.Sprintf("%s-%s", channelID, edgeName.Name) - pkgBuilder.WriteString(fmt.Sprintf(" %s[%q]-- \"%s(%s)\" --> %s[%q]\n", skipRangeID, edgeName.Name, "skipRange", ce.SkipRange, entryID, ce.Name)) - } + } + if len(ce.SkipRange) > 0 { + skipRange, err := semver.ParseRange(ce.SkipRange) + if err == nil { + for _, edgeName := range filteredChannel.Entries { + if skipRange(versionMap[edgeName.Name]) { + skipRangeID := fmt.Sprintf("%s-%s", channelID, edgeName.Name) + fmt.Fprintf(pkgBuilder, " %s[%q]-- \"%s(%s)\" --> %s[%q]\n", skipRangeID, edgeName.Name, "skipRange", ce.SkipRange, entryID, ce.Name) + handleSemantics(ce.Name, linkID, processExisting) + linkID++ } - } else { - fmt.Fprintf(os.Stderr, "warning: ignoring invalid SkipRange for package/edge %q/%q: %v\n", c.Package, ce.Name, err) } + } else { + fmt.Fprintf(os.Stderr, "warning: ignoring invalid SkipRange for package/edge %q/%q: %v\n", c.Package, ce.Name, err) } } + // have to process replaces last, because applicablity can be impacted by skips + if len(ce.Replaces) > 0 { + replacesID := fmt.Sprintf("%s-%s", channelID, ce.Replaces) + fmt.Fprintf(pkgBuilder, " %s[%q]-- %s --> %s[%q]\n", replacesID, ce.Replaces, "replace", entryID, ce.Name) + handleSemantics(ce.Name, linkID, processExisting) + linkID++ + } } - pkgBuilder.WriteString(" end\n") + fmt.Fprintf(pkgBuilder, " end\n") } } _, _ = out.Write([]byte("graph LR\n")) _, _ = out.Write([]byte(" classDef deprecated fill:#E8960F\n")) + _, _ = out.Write([]byte(" classDef skipped stroke:#FF0000,stroke-width:4px\n")) + _, _ = out.Write([]byte(" classDef deprecatedskipped fill:#E8960F,stroke:#FF0000,stroke-width:4px\n")) pkgNames := []string{} for pname := range pkgs { pkgNames = append(pkgNames, pname) @@ -197,22 +253,35 @@ func (writer *MermaidWriter) WriteChannels(cfg DeclarativeConfig, out io.Writer) return pkgNames[i] < pkgNames[j] }) for _, pkgName := range pkgNames { - _, _ = out.Write([]byte(fmt.Sprintf(" %%%% package %q\n", pkgName))) - _, _ = out.Write([]byte(fmt.Sprintf(" subgraph %q\n", pkgName))) + _, _ = fmt.Fprintf(out, " %%%% package %q\n", pkgName) + _, _ = fmt.Fprintf(out, " subgraph %q\n", pkgName) _, _ = out.Write([]byte(pkgs[pkgName].String())) _, _ = out.Write([]byte(" end\n")) } if deprecatedPackage != "" { - _, _ = out.Write([]byte(fmt.Sprintf("style %s fill:#989695\n", deprecatedPackage))) + _, _ = fmt.Fprintf(out, "style %s fill:#989695\n", deprecatedPackage) + } + + if len(deprecatedChannelIDs) > 0 { + for _, deprecatedChannel := range deprecatedChannelIDs { + _, _ = fmt.Fprintf(out, "style %s fill:#DCD0FF\n", deprecatedChannel) + } } - if len(deprecatedChannels) > 0 { - for _, deprecatedChannel := range deprecatedChannels { - _, _ = out.Write([]byte(fmt.Sprintf("style %s fill:#DCD0FF\n", deprecatedChannel))) + // express the decoration classes + for key := range decoratedBundleIDs { + if len(decoratedBundleIDs[key]) > 0 { + b := slices.Clone(decoratedBundleIDs[key]) + slices.Sort(b) + _, _ = fmt.Fprintf(out, "class %s %s\n", strings.Join(b, ","), key) } } + if len(skippedLinkIDs) > 0 { + _, _ = fmt.Fprintf(out, "linkStyle %s %s\n", strings.Join(skippedLinkIDs, ","), "stroke:#FF0000,stroke-width:3px,stroke-dasharray:5;") + } + return nil } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go index ad347113c..2331b8b4f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -95,7 +95,8 @@ func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, const help: help, variableLabels: variableLabels.compile(), } - if !model.IsValidMetricName(model.LabelValue(fqName)) { + //nolint:staticcheck // TODO: Don't use deprecated model.NameValidationScheme. + if !model.NameValidationScheme.IsValidMetricName(fqName) { d.err = fmt.Errorf("%q is not a valid metric name", fqName) return d } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go index f7f97ef92..d273b6640 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go @@ -67,7 +67,7 @@ func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool) } // Our current conversion moves to legacy naming, so use legacy validation. - valid := model.IsValidLegacyMetricName(namespace + "_" + subsystem + "_" + name) + valid := model.LegacyValidation.IsValidMetricName(namespace + "_" + subsystem + "_" + name) switch d.Kind { case metrics.KindUint64: case metrics.KindFloat64: diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go index c21911f29..5fe8d3b4d 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go @@ -184,5 +184,6 @@ func validateLabelValues(vals []string, expectedNumberOfValues int) error { } func checkLabelName(l string) bool { - return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix) + //nolint:staticcheck // TODO: Don't use deprecated model.NameValidationScheme. + return model.NameValidationScheme.IsValidLabelName(l) && !strings.HasPrefix(l, reservedLabelPrefix) } diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index 1448439b7..7b762370e 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -70,19 +70,34 @@ func ResponseFormat(h http.Header) Format { return FmtUnknown } -// NewDecoder returns a new decoder based on the given input format. -// If the input format does not imply otherwise, a text format decoder is returned. +// NewDecoder returns a new decoder based on the given input format. Metric +// names are validated based on the provided Format -- if the format requires +// escaping, raditional Prometheues validity checking is used. Otherwise, names +// are checked for UTF-8 validity. Supported formats include delimited protobuf +// and Prometheus text format. For historical reasons, this decoder fallbacks +// to classic text decoding for any other format. This decoder does not fully +// support OpenMetrics although it may often succeed due to the similarities +// between the formats. This decoder may not support the latest features of +// Prometheus text format and is not intended for high-performance applications. +// See: https://github.com/prometheus/common/issues/812 func NewDecoder(r io.Reader, format Format) Decoder { + scheme := model.LegacyValidation + if format.ToEscapingScheme() == model.NoEscaping { + scheme = model.UTF8Validation + } switch format.FormatType() { case TypeProtoDelim: - return &protoDecoder{r: bufio.NewReader(r)} + return &protoDecoder{r: bufio.NewReader(r), s: scheme} + case TypeProtoText, TypeProtoCompact: + return &errDecoder{err: fmt.Errorf("format %s not supported for decoding", format)} } - return &textDecoder{r: r} + return &textDecoder{r: r, s: scheme} } // protoDecoder implements the Decoder interface for protocol buffers. type protoDecoder struct { r protodelim.Reader + s model.ValidationScheme } // Decode implements the Decoder interface. @@ -93,7 +108,7 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error { if err := opts.UnmarshalFrom(d.r, v); err != nil { return err } - if !model.IsValidMetricName(model.LabelValue(v.GetName())) { + if !d.s.IsValidMetricName(v.GetName()) { return fmt.Errorf("invalid metric name %q", v.GetName()) } for _, m := range v.GetMetric() { @@ -107,7 +122,7 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error { if !model.LabelValue(l.GetValue()).IsValid() { return fmt.Errorf("invalid label value %q", l.GetValue()) } - if !model.LabelName(l.GetName()).IsValid() { + if !d.s.IsValidLabelName(l.GetName()) { return fmt.Errorf("invalid label name %q", l.GetName()) } } @@ -115,10 +130,20 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error { return nil } +// errDecoder is an error-state decoder that always returns the same error. +type errDecoder struct { + err error +} + +func (d *errDecoder) Decode(*dto.MetricFamily) error { + return d.err +} + // textDecoder implements the Decoder interface for the text protocol. type textDecoder struct { r io.Reader fams map[string]*dto.MetricFamily + s model.ValidationScheme err error } @@ -126,7 +151,7 @@ type textDecoder struct { func (d *textDecoder) Decode(v *dto.MetricFamily) error { if d.err == nil { // Read all metrics in one shot. - var p TextParser + p := NewTextParser(d.s) d.fams, d.err = p.TextToMetricFamilies(d.r) // If we don't get an error, store io.EOF for the end. if d.err == nil { diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go index d7f3d76f5..73c24dfbc 100644 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -18,14 +18,12 @@ import ( "io" "net/http" + "github.com/munnerz/goautoneg" + dto "github.com/prometheus/client_model/go" "google.golang.org/protobuf/encoding/protodelim" "google.golang.org/protobuf/encoding/prototext" "github.com/prometheus/common/model" - - "github.com/munnerz/goautoneg" - - dto "github.com/prometheus/client_model/go" ) // Encoder types encode metric families into an underlying wire protocol. @@ -61,7 +59,7 @@ func (ec encoderCloser) Close() error { // appropriate accepted type is found, FmtText is returned (which is the // Prometheus text format). This function will never negotiate FmtOpenMetrics, // as the support is still experimental. To include the option to negotiate -// FmtOpenMetrics, use NegotiateOpenMetrics. +// FmtOpenMetrics, use NegotiateIncludingOpenMetrics. func Negotiate(h http.Header) Format { escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String()))) for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { @@ -153,7 +151,7 @@ func NewEncoder(w io.Writer, format Format, options ...EncoderOption) Encoder { case TypeProtoDelim: return encoderCloser{ encode: func(v *dto.MetricFamily) error { - _, err := protodelim.MarshalTo(w, v) + _, err := protodelim.MarshalTo(w, model.EscapeMetricFamily(v, escapingScheme)) return err }, close: func() error { return nil }, diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go index b26886560..c34c7de43 100644 --- a/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -36,9 +36,11 @@ const ( ProtoType = `application/vnd.google.protobuf` ProtoProtocol = `io.prometheus.client.MetricFamily` // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead. - ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" - OpenMetricsType = `application/openmetrics-text` + ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + OpenMetricsType = `application/openmetrics-text` + //nolint:revive // Allow for underscores. OpenMetricsVersion_0_0_1 = "0.0.1" + //nolint:revive // Allow for underscores. OpenMetricsVersion_1_0_0 = "1.0.0" // The Content-Type values for the different wire protocols. Do not do direct @@ -54,8 +56,10 @@ const ( // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead. FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead. + //nolint:revive // Allow for underscores. FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead. + //nolint:revive // Allow for underscores. FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` ) @@ -188,8 +192,8 @@ func (f Format) FormatType() FormatType { // Format contains a escaping=allow-utf-8 term, it will select NoEscaping. If a valid // "escaping" term exists, that will be used. Otherwise, the global default will // be returned. -func (format Format) ToEscapingScheme() model.EscapingScheme { - for _, p := range strings.Split(string(format), ";") { +func (f Format) ToEscapingScheme() model.EscapingScheme { + for _, p := range strings.Split(string(f), ";") { toks := strings.Split(p, "=") if len(toks) != 2 { continue diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go index dfac962a4..0290f6abc 100644 --- a/vendor/github.com/prometheus/common/expfmt/fuzz.go +++ b/vendor/github.com/prometheus/common/expfmt/fuzz.go @@ -17,7 +17,11 @@ package expfmt -import "bytes" +import ( + "bytes" + + "github.com/prometheus/common/model" +) // Fuzz text metric parser with with github.com/dvyukov/go-fuzz: // @@ -26,9 +30,8 @@ import "bytes" // // Further input samples should go in the folder fuzz/corpus. func Fuzz(in []byte) int { - parser := TextParser{} + parser := NewTextParser(model.UTF8Validation) _, err := parser.TextToMetricFamilies(bytes.NewReader(in)) - if err != nil { return 0 } diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go index a21ed4ec1..8dbf6d04e 100644 --- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -22,11 +22,10 @@ import ( "strconv" "strings" + dto "github.com/prometheus/client_model/go" "google.golang.org/protobuf/types/known/timestamppb" "github.com/prometheus/common/model" - - dto "github.com/prometheus/client_model/go" ) type encoderOption struct { @@ -249,7 +248,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E // Finally the samples, one line for each. if metricType == dto.MetricType_COUNTER && strings.HasSuffix(name, "_total") { - compliantName = compliantName + "_total" + compliantName += "_total" } for _, metric := range in.Metric { switch metricType { @@ -477,7 +476,7 @@ func writeOpenMetricsNameAndLabelPairs( if name != "" { // If the name does not pass the legacy validity check, we must put the // metric name inside the braces, quoted. - if !model.IsValidLegacyMetricName(name) { + if !model.LegacyValidation.IsValidMetricName(name) { metricInsideBraces = true err := w.WriteByte(separator) written++ @@ -641,11 +640,11 @@ func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) { if err != nil { return written, err } - err = (*e).Timestamp.CheckValid() + err = e.Timestamp.CheckValid() if err != nil { return written, err } - ts := (*e).Timestamp.AsTime() + ts := e.Timestamp.AsTime() // TODO(beorn7): Format this directly from components of ts to // avoid overflow/underflow and precision issues of the float // conversion. diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go index 4b86434b3..c4e9c1bbc 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -22,9 +22,9 @@ import ( "strings" "sync" - "github.com/prometheus/common/model" - dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/common/model" ) // enhancedWriter has all the enhanced write functions needed here. bufio.Writer @@ -354,7 +354,7 @@ func writeNameAndLabelPairs( if name != "" { // If the name does not pass the legacy validity check, we must put the // metric name inside the braces. - if !model.IsValidLegacyMetricName(name) { + if !model.LegacyValidation.IsValidMetricName(name) { metricInsideBraces = true err := w.WriteByte(separator) written++ @@ -498,7 +498,7 @@ func writeInt(w enhancedWriter, i int64) (int, error) { // writeName writes a string as-is if it complies with the legacy naming // scheme, or escapes it in double quotes if not. func writeName(w enhancedWriter, name string) (int, error) { - if model.IsValidLegacyMetricName(name) { + if model.LegacyValidation.IsValidMetricName(name) { return w.WriteString(name) } var written int diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index 4067978a1..8f2edde32 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -78,6 +78,14 @@ type TextParser struct { // These indicate if the metric name from the current line being parsed is inside // braces and if that metric name was found respectively. currentMetricIsInsideBraces, currentMetricInsideBracesIsPresent bool + // scheme sets the desired ValidationScheme for names. Defaults to the invalid + // UnsetValidation. + scheme model.ValidationScheme +} + +// NewTextParser returns a new TextParser with the provided nameValidationScheme. +func NewTextParser(nameValidationScheme model.ValidationScheme) TextParser { + return TextParser{scheme: nameValidationScheme} } // TextToMetricFamilies reads 'in' as the simple and flat text-based exchange @@ -126,6 +134,7 @@ func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricF func (p *TextParser) reset(in io.Reader) { p.metricFamiliesByName = map[string]*dto.MetricFamily{} + p.currentLabelPairs = nil if p.buf == nil { p.buf = bufio.NewReader(in) } else { @@ -216,6 +225,9 @@ func (p *TextParser) startComment() stateFn { return nil } p.setOrCreateCurrentMF() + if p.err != nil { + return nil + } if p.skipBlankTab(); p.err != nil { return nil // Unexpected end of input. } @@ -244,6 +256,9 @@ func (p *TextParser) readingMetricName() stateFn { return nil } p.setOrCreateCurrentMF() + if p.err != nil { + return nil + } // Now is the time to fix the type if it hasn't happened yet. if p.currentMF.Type == nil { p.currentMF.Type = dto.MetricType_UNTYPED.Enum() @@ -311,6 +326,9 @@ func (p *TextParser) startLabelName() stateFn { switch p.currentByte { case ',': p.setOrCreateCurrentMF() + if p.err != nil { + return nil + } if p.currentMF.Type == nil { p.currentMF.Type = dto.MetricType_UNTYPED.Enum() } @@ -319,6 +337,10 @@ func (p *TextParser) startLabelName() stateFn { return p.startLabelName case '}': p.setOrCreateCurrentMF() + if p.err != nil { + p.currentLabelPairs = nil + return nil + } if p.currentMF.Type == nil { p.currentMF.Type = dto.MetricType_UNTYPED.Enum() } @@ -341,6 +363,12 @@ func (p *TextParser) startLabelName() stateFn { p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) + p.currentLabelPairs = nil + return nil + } + if !p.scheme.IsValidLabelName(p.currentLabelPair.GetName()) { + p.parseError(fmt.Sprintf("invalid label name %q", p.currentLabelPair.GetName())) + p.currentLabelPairs = nil return nil } // Special summary/histogram treatment. Don't add 'quantile' and 'le' @@ -353,13 +381,12 @@ func (p *TextParser) startLabelName() stateFn { labels := make(map[string]struct{}) for _, l := range p.currentLabelPairs { lName := l.GetName() - if _, exists := labels[lName]; !exists { - labels[lName] = struct{}{} - } else { + if _, exists := labels[lName]; exists { p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName())) p.currentLabelPairs = nil return nil } + labels[lName] = struct{}{} } return p.startLabelValue } @@ -440,7 +467,8 @@ func (p *TextParser) readingValue() stateFn { // When we are here, we have read all the labels, so for the // special case of a summary/histogram, we can finally find out // if the metric already exists. - if p.currentMF.GetType() == dto.MetricType_SUMMARY { + switch p.currentMF.GetType() { + case dto.MetricType_SUMMARY: signature := model.LabelsToSignature(p.currentLabels) if summary := p.summaries[signature]; summary != nil { p.currentMetric = summary @@ -448,7 +476,7 @@ func (p *TextParser) readingValue() stateFn { p.summaries[signature] = p.currentMetric p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) } - } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + case dto.MetricType_HISTOGRAM: signature := model.LabelsToSignature(p.currentLabels) if histogram := p.histograms[signature]; histogram != nil { p.currentMetric = histogram @@ -456,7 +484,7 @@ func (p *TextParser) readingValue() stateFn { p.histograms[signature] = p.currentMetric p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) } - } else { + default: p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) } if p.readTokenUntilWhitespace(); p.err != nil { @@ -805,6 +833,10 @@ func (p *TextParser) setOrCreateCurrentMF() { p.currentIsHistogramCount = false p.currentIsHistogramSum = false name := p.currentToken.String() + if !p.scheme.IsValidMetricName(name) { + p.parseError(fmt.Sprintf("invalid metric name %q", name)) + return + } if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil { return } diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go index de83afe93..93498c9ec 100644 --- a/vendor/github.com/prometheus/common/model/labels.go +++ b/vendor/github.com/prometheus/common/model/labels.go @@ -16,9 +16,10 @@ package model import ( "encoding/json" "fmt" - "regexp" "strings" "unicode/utf8" + + "github.com/grafana/regexp" ) const ( @@ -32,6 +33,12 @@ const ( // MetricNameLabel is the label name indicating the metric name of a // timeseries. MetricNameLabel = "__name__" + // MetricTypeLabel is the label name indicating the metric type of + // timeseries as per the PROM-39 proposal. + MetricTypeLabel = "__type__" + // MetricUnitLabel is the label name indicating the metric unit of + // timeseries as per the PROM-39 proposal. + MetricUnitLabel = "__unit__" // SchemeLabel is the name of the label that holds the scheme on which to // scrape a target. @@ -100,34 +107,21 @@ type LabelName string // IsValid returns true iff the name matches the pattern of LabelNameRE when // NameValidationScheme is set to LegacyValidation, or valid UTF-8 if // NameValidationScheme is set to UTF8Validation. +// +// Deprecated: This method should not be used and may be removed in the future. +// Use [ValidationScheme.IsValidLabelName] instead. func (ln LabelName) IsValid() bool { - if len(ln) == 0 { - return false - } - switch NameValidationScheme { - case LegacyValidation: - return ln.IsValidLegacy() - case UTF8Validation: - return utf8.ValidString(string(ln)) - default: - panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme)) - } + return NameValidationScheme.IsValidLabelName(string(ln)) } // IsValidLegacy returns true iff name matches the pattern of LabelNameRE for // legacy names. It does not use LabelNameRE for the check but a much faster // hardcoded implementation. +// +// Deprecated: This method should not be used and may be removed in the future. +// Use [LegacyValidation.IsValidLabelName] instead. func (ln LabelName) IsValidLegacy() bool { - if len(ln) == 0 { - return false - } - for i, b := range ln { - // TODO: Apply De Morgan's law. Make sure there are tests for this. - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { //nolint:staticcheck - return false - } - } - return true + return LegacyValidation.IsValidLabelName(string(ln)) } // UnmarshalYAML implements the yaml.Unmarshaler interface. diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go index d0ad88da3..9de47b256 100644 --- a/vendor/github.com/prometheus/common/model/labelset.go +++ b/vendor/github.com/prometheus/common/model/labelset.go @@ -114,10 +114,10 @@ func (ls LabelSet) Clone() LabelSet { } // Merge is a helper function to non-destructively merge two label sets. -func (l LabelSet) Merge(other LabelSet) LabelSet { - result := make(LabelSet, len(l)) +func (ls LabelSet) Merge(other LabelSet) LabelSet { + result := make(LabelSet, len(ls)) - for k, v := range l { + for k, v := range ls { result[k] = v } @@ -140,7 +140,7 @@ func (ls LabelSet) FastFingerprint() Fingerprint { } // UnmarshalJSON implements the json.Unmarshaler interface. -func (l *LabelSet) UnmarshalJSON(b []byte) error { +func (ls *LabelSet) UnmarshalJSON(b []byte) error { var m map[LabelName]LabelValue if err := json.Unmarshal(b, &m); err != nil { return err @@ -153,6 +153,6 @@ func (l *LabelSet) UnmarshalJSON(b []byte) error { return fmt.Errorf("%q is not a valid label name", ln) } } - *l = LabelSet(m) + *ls = LabelSet(m) return nil } diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go index a6b01755b..f7f61c5d5 100644 --- a/vendor/github.com/prometheus/common/model/metric.go +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -14,16 +14,18 @@ package model import ( + "encoding/json" "errors" "fmt" - "regexp" "sort" "strconv" "strings" "unicode/utf8" + "github.com/grafana/regexp" dto "github.com/prometheus/client_model/go" "google.golang.org/protobuf/proto" + "gopkg.in/yaml.v2" ) var ( @@ -62,16 +64,151 @@ var ( type ValidationScheme int const ( + // UnsetValidation represents an undefined ValidationScheme. + // Should not be used in practice. + UnsetValidation ValidationScheme = iota + // LegacyValidation is a setting that requires that all metric and label names // conform to the original Prometheus character requirements described by // MetricNameRE and LabelNameRE. - LegacyValidation ValidationScheme = iota + LegacyValidation // UTF8Validation only requires that metric and label names be valid UTF-8 // strings. UTF8Validation ) +var _ interface { + yaml.Marshaler + yaml.Unmarshaler + json.Marshaler + json.Unmarshaler + fmt.Stringer +} = new(ValidationScheme) + +// String returns the string representation of s. +func (s ValidationScheme) String() string { + switch s { + case UnsetValidation: + return "unset" + case LegacyValidation: + return "legacy" + case UTF8Validation: + return "utf8" + default: + panic(fmt.Errorf("unhandled ValidationScheme: %d", s)) + } +} + +// MarshalYAML implements the yaml.Marshaler interface. +func (s ValidationScheme) MarshalYAML() (any, error) { + switch s { + case UnsetValidation: + return "", nil + case LegacyValidation, UTF8Validation: + return s.String(), nil + default: + panic(fmt.Errorf("unhandled ValidationScheme: %d", s)) + } +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (s *ValidationScheme) UnmarshalYAML(unmarshal func(any) error) error { + var scheme string + if err := unmarshal(&scheme); err != nil { + return err + } + return s.Set(scheme) +} + +// MarshalJSON implements the json.Marshaler interface. +func (s ValidationScheme) MarshalJSON() ([]byte, error) { + switch s { + case UnsetValidation: + return json.Marshal("") + case UTF8Validation, LegacyValidation: + return json.Marshal(s.String()) + default: + return nil, fmt.Errorf("unhandled ValidationScheme: %d", s) + } +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (s *ValidationScheme) UnmarshalJSON(bytes []byte) error { + var repr string + if err := json.Unmarshal(bytes, &repr); err != nil { + return err + } + return s.Set(repr) +} + +// Set implements the pflag.Value interface. +func (s *ValidationScheme) Set(text string) error { + switch text { + case "": + // Don't change the value. + case LegacyValidation.String(): + *s = LegacyValidation + case UTF8Validation.String(): + *s = UTF8Validation + default: + return fmt.Errorf("unrecognized ValidationScheme: %q", text) + } + return nil +} + +// IsValidMetricName returns whether metricName is valid according to s. +func (s ValidationScheme) IsValidMetricName(metricName string) bool { + switch s { + case LegacyValidation: + if len(metricName) == 0 { + return false + } + for i, b := range metricName { + if !isValidLegacyRune(b, i) { + return false + } + } + return true + case UTF8Validation: + if len(metricName) == 0 { + return false + } + return utf8.ValidString(metricName) + default: + panic(fmt.Sprintf("Invalid name validation scheme requested: %s", s.String())) + } +} + +// IsValidLabelName returns whether labelName is valid according to s. +func (s ValidationScheme) IsValidLabelName(labelName string) bool { + switch s { + case LegacyValidation: + if len(labelName) == 0 { + return false + } + for i, b := range labelName { + // TODO: Apply De Morgan's law. Make sure there are tests for this. + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { //nolint:staticcheck + return false + } + } + return true + case UTF8Validation: + if len(labelName) == 0 { + return false + } + return utf8.ValidString(labelName) + default: + panic(fmt.Sprintf("Invalid name validation scheme requested: %s", s)) + } +} + +// Type implements the pflag.Value interface. +func (ValidationScheme) Type() string { + return "validationScheme" +} + type EscapingScheme int const ( @@ -101,7 +238,7 @@ const ( // Accept header, the default NameEscapingScheme will be used. EscapingKey = "escaping" - // Possible values for Escaping Key: + // Possible values for Escaping Key. AllowUTF8 = "allow-utf-8" // No escaping required. EscapeUnderscores = "underscores" EscapeDots = "dots" @@ -175,34 +312,22 @@ func (m Metric) FastFingerprint() Fingerprint { // IsValidMetricName returns true iff name matches the pattern of MetricNameRE // for legacy names, and iff it's valid UTF-8 if the UTF8Validation scheme is // selected. +// +// Deprecated: This function should not be used and might be removed in the future. +// Use [ValidationScheme.IsValidMetricName] instead. func IsValidMetricName(n LabelValue) bool { - switch NameValidationScheme { - case LegacyValidation: - return IsValidLegacyMetricName(string(n)) - case UTF8Validation: - if len(n) == 0 { - return false - } - return utf8.ValidString(string(n)) - default: - panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme)) - } + return NameValidationScheme.IsValidMetricName(string(n)) } // IsValidLegacyMetricName is similar to IsValidMetricName but always uses the // legacy validation scheme regardless of the value of NameValidationScheme. // This function, however, does not use MetricNameRE for the check but a much // faster hardcoded implementation. +// +// Deprecated: This function should not be used and might be removed in the future. +// Use [LegacyValidation.IsValidMetricName] instead. func IsValidLegacyMetricName(n string) bool { - if len(n) == 0 { - return false - } - for i, b := range n { - if !isValidLegacyRune(b, i) { - return false - } - } - return true + return LegacyValidation.IsValidMetricName(n) } // EscapeMetricFamily escapes the given metric names and labels with the given @@ -310,13 +435,14 @@ func EscapeName(name string, scheme EscapingScheme) string { case DotsEscaping: // Do not early return for legacy valid names, we still escape underscores. for i, b := range name { - if b == '_' { + switch { + case b == '_': escaped.WriteString("__") - } else if b == '.' { + case b == '.': escaped.WriteString("_dot_") - } else if isValidLegacyRune(b, i) { + case isValidLegacyRune(b, i): escaped.WriteRune(b) - } else { + default: escaped.WriteString("__") } } @@ -327,13 +453,14 @@ func EscapeName(name string, scheme EscapingScheme) string { } escaped.WriteString("U__") for i, b := range name { - if b == '_' { + switch { + case b == '_': escaped.WriteString("__") - } else if isValidLegacyRune(b, i) { + case isValidLegacyRune(b, i): escaped.WriteRune(b) - } else if !utf8.ValidRune(b) { + case !utf8.ValidRune(b): escaped.WriteString("_FFFD_") - } else { + default: escaped.WriteRune('_') escaped.WriteString(strconv.FormatInt(int64(b), 16)) escaped.WriteRune('_') @@ -345,7 +472,7 @@ func EscapeName(name string, scheme EscapingScheme) string { } } -// lower function taken from strconv.atoi +// lower function taken from strconv.atoi. func lower(c byte) byte { return c | ('x' - 'X') } @@ -409,11 +536,12 @@ func UnescapeName(name string, scheme EscapingScheme) string { } r := lower(escapedName[i]) utf8Val *= 16 - if r >= '0' && r <= '9' { + switch { + case r >= '0' && r <= '9': utf8Val += uint(r) - '0' - } else if r >= 'a' && r <= 'f' { + case r >= 'a' && r <= 'f': utf8Val += uint(r) - 'a' + 10 - } else { + default: return name } i++ diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go index 8f91a9702..8c51dbaac 100644 --- a/vendor/github.com/prometheus/common/model/silence.go +++ b/vendor/github.com/prometheus/common/model/silence.go @@ -17,8 +17,9 @@ import ( "encoding/json" "errors" "fmt" - "regexp" "time" + + "github.com/grafana/regexp" ) // Matcher describes a matches the value of a given label. diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go index fed9e87b9..1730b0fdc 100644 --- a/vendor/github.com/prometheus/common/model/time.go +++ b/vendor/github.com/prometheus/common/model/time.go @@ -126,14 +126,14 @@ func (t *Time) UnmarshalJSON(b []byte) error { p := strings.Split(string(b), ".") switch len(p) { case 1: - v, err := strconv.ParseInt(string(p[0]), 10, 64) + v, err := strconv.ParseInt(p[0], 10, 64) if err != nil { return err } *t = Time(v * second) case 2: - v, err := strconv.ParseInt(string(p[0]), 10, 64) + v, err := strconv.ParseInt(p[0], 10, 64) if err != nil { return err } @@ -143,7 +143,7 @@ func (t *Time) UnmarshalJSON(b []byte) error { if prec < 0 { p[1] = p[1][:dotPrecision] } else if prec > 0 { - p[1] = p[1] + strings.Repeat("0", prec) + p[1] += strings.Repeat("0", prec) } va, err := strconv.ParseInt(p[1], 10, 32) @@ -170,15 +170,15 @@ func (t *Time) UnmarshalJSON(b []byte) error { // This type should not propagate beyond the scope of input/output processing. type Duration time.Duration -// Set implements pflag/flag.Value +// Set implements pflag/flag.Value. func (d *Duration) Set(s string) error { var err error *d, err = ParseDuration(s) return err } -// Type implements pflag.Value -func (d *Duration) Type() string { +// Type implements pflag.Value. +func (*Duration) Type() string { return "duration" } diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go index 8050637d8..a9995a37e 100644 --- a/vendor/github.com/prometheus/common/model/value.go +++ b/vendor/github.com/prometheus/common/model/value.go @@ -191,7 +191,8 @@ func (ss SampleStream) String() string { } func (ss SampleStream) MarshalJSON() ([]byte, error) { - if len(ss.Histograms) > 0 && len(ss.Values) > 0 { + switch { + case len(ss.Histograms) > 0 && len(ss.Values) > 0: v := struct { Metric Metric `json:"metric"` Values []SamplePair `json:"values"` @@ -202,7 +203,7 @@ func (ss SampleStream) MarshalJSON() ([]byte, error) { Histograms: ss.Histograms, } return json.Marshal(&v) - } else if len(ss.Histograms) > 0 { + case len(ss.Histograms) > 0: v := struct { Metric Metric `json:"metric"` Histograms []SampleHistogramPair `json:"histograms"` @@ -211,7 +212,7 @@ func (ss SampleStream) MarshalJSON() ([]byte, error) { Histograms: ss.Histograms, } return json.Marshal(&v) - } else { + default: v := struct { Metric Metric `json:"metric"` Values []SamplePair `json:"values"` @@ -258,7 +259,7 @@ func (s Scalar) String() string { // MarshalJSON implements json.Marshaler. func (s Scalar) MarshalJSON() ([]byte, error) { v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64) - return json.Marshal([...]interface{}{s.Timestamp, string(v)}) + return json.Marshal([...]interface{}{s.Timestamp, v}) } // UnmarshalJSON implements json.Unmarshaler. @@ -349,9 +350,9 @@ func (m Matrix) Len() int { return len(m) } func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) } func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] } -func (mat Matrix) String() string { - matCp := make(Matrix, len(mat)) - copy(matCp, mat) +func (m Matrix) String() string { + matCp := make(Matrix, len(m)) + copy(matCp, m) sort.Sort(matCp) strs := make([]string, len(matCp)) diff --git a/vendor/github.com/prometheus/common/model/value_histogram.go b/vendor/github.com/prometheus/common/model/value_histogram.go index 895e6a3e8..91ce5b7a4 100644 --- a/vendor/github.com/prometheus/common/model/value_histogram.go +++ b/vendor/github.com/prometheus/common/model/value_histogram.go @@ -86,22 +86,22 @@ func (s *HistogramBucket) Equal(o *HistogramBucket) bool { return s == o || (s.Boundaries == o.Boundaries && s.Lower == o.Lower && s.Upper == o.Upper && s.Count == o.Count) } -func (b HistogramBucket) String() string { +func (s HistogramBucket) String() string { var sb strings.Builder - lowerInclusive := b.Boundaries == 1 || b.Boundaries == 3 - upperInclusive := b.Boundaries == 0 || b.Boundaries == 3 + lowerInclusive := s.Boundaries == 1 || s.Boundaries == 3 + upperInclusive := s.Boundaries == 0 || s.Boundaries == 3 if lowerInclusive { sb.WriteRune('[') } else { sb.WriteRune('(') } - fmt.Fprintf(&sb, "%g,%g", b.Lower, b.Upper) + fmt.Fprintf(&sb, "%g,%g", s.Lower, s.Upper) if upperInclusive { sb.WriteRune(']') } else { sb.WriteRune(')') } - fmt.Fprintf(&sb, ":%v", b.Count) + fmt.Fprintf(&sb, ":%v", s.Count) return sb.String() } diff --git a/vendor/github.com/prometheus/common/model/value_type.go b/vendor/github.com/prometheus/common/model/value_type.go index 726c50ee6..078910f46 100644 --- a/vendor/github.com/prometheus/common/model/value_type.go +++ b/vendor/github.com/prometheus/common/model/value_type.go @@ -66,8 +66,8 @@ func (et *ValueType) UnmarshalJSON(b []byte) error { return nil } -func (e ValueType) String() string { - switch e { +func (et ValueType) String() string { + switch et { case ValNone: return "" case ValScalar: diff --git a/vendor/github.com/spf13/cobra/.golangci.yml b/vendor/github.com/spf13/cobra/.golangci.yml index 2c8f4808c..6acf8ab1e 100644 --- a/vendor/github.com/spf13/cobra/.golangci.yml +++ b/vendor/github.com/spf13/cobra/.golangci.yml @@ -12,14 +12,20 @@ # See the License for the specific language governing permissions and # limitations under the License. +version: "2" + run: - deadline: 5m + timeout: 5m + +formatters: + enable: + - gofmt + - goimports linters: - disable-all: true + default: none enable: #- bodyclose - # - deadcode ! deprecated since v1.49.0; replaced by 'unused' #- depguard #- dogsled #- dupl @@ -30,28 +36,24 @@ linters: - goconst - gocritic #- gocyclo - - gofmt - - goimports - #- gomnd #- goprintffuncname - gosec - - gosimple - govet - ineffassign #- lll - misspell + #- mnd #- nakedret #- noctx - nolintlint #- rowserrcheck - #- scopelint - staticcheck - #- structcheck ! deprecated since v1.49.0; replaced by 'unused' - - stylecheck - #- typecheck - unconvert #- unparam - unused - # - varcheck ! deprecated since v1.49.0; replaced by 'unused' #- whitespace - fast: false + exclusions: + presets: + - common-false-positives + - legacy + - std-error-handling diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md index 71757151c..8416275f4 100644 --- a/vendor/github.com/spf13/cobra/README.md +++ b/vendor/github.com/spf13/cobra/README.md @@ -1,8 +1,14 @@ - -![cobra logo](https://github.com/user-attachments/assets/cbc3adf8-0dff-46e9-a88d-5e2d971c169e) +
                + +cobra-logo + +
                Cobra is a library for creating powerful modern CLI applications. +Visit Cobra.dev for extensive documentation + + Cobra is used in many Go projects such as [Kubernetes](https://kubernetes.io/), [Hugo](https://gohugo.io), and [GitHub CLI](https://github.com/cli/cli) to name a few. [This list](site/content/projects_using_cobra.md) contains a more extensive list of projects using Cobra. @@ -11,6 +17,20 @@ name a few. [This list](site/content/projects_using_cobra.md) contains a more ex [![Go Reference](https://pkg.go.dev/badge/github.com/spf13/cobra.svg)](https://pkg.go.dev/github.com/spf13/cobra) [![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cobra)](https://goreportcard.com/report/github.com/spf13/cobra) [![Slack](https://img.shields.io/badge/Slack-cobra-brightgreen)](https://gophers.slack.com/archives/CD3LP1199) +
                +
                + Supported by: +
                +
                + + Warp sponsorship + + +### [Warp, the AI terminal for devs](https://www.warp.dev/cobra) +[Try Cobra in Warp today](https://www.warp.dev/cobra)
                + +
                +
                # Overview diff --git a/vendor/github.com/spf13/cobra/SECURITY.md b/vendor/github.com/spf13/cobra/SECURITY.md new file mode 100644 index 000000000..54e60c28c --- /dev/null +++ b/vendor/github.com/spf13/cobra/SECURITY.md @@ -0,0 +1,105 @@ +# Security Policy + +## Reporting a Vulnerability + +The `cobra` maintainers take security issues seriously and +we appreciate your efforts to _**responsibly**_ disclose your findings. +We will make every effort to swiftly respond and address concerns. + +To report a security vulnerability: + +1. **DO NOT** create a public GitHub issue for the vulnerability! +2. **DO NOT** create a public GitHub Pull Request with a fix for the vulnerability! +3. Send an email to `cobra-security@googlegroups.com`. +4. Include the following details in your report: + - Description of the vulnerability + - Steps to reproduce + - Potential impact of the vulnerability (to your downstream project, to the Go ecosystem, etc.) + - Any potential mitigations you've already identified +5. Allow up to 7 days for an initial response. + You should receive an acknowledgment of your report and an estimated timeline for a fix. +6. (Optional) If you have a fix and would like to contribute your patch, please work + directly with the maintainers via `cobra-security@googlegroups.com` to + coordinate pushing the patch to GitHub, cutting a new release, and disclosing the change. + +## Response Process + +When a security vulnerability report is received, the `cobra` maintainers will: + +1. Confirm receipt of the vulnerability report within 7 days. +2. Assess the report to determine if it constitutes a security vulnerability. +3. If confirmed, assign the vulnerability a severity level and create a timeline for addressing it. +4. Develop and test a fix. +5. Patch the vulnerability and make a new GitHub release: the maintainers will coordinate disclosure with the reporter. +6. Create a new GitHub Security Advisory to inform the broader Go ecosystem + +## Disclosure Policy + +The `cobra` maintainers follow a coordinated disclosure process: + +1. Security vulnerabilities will be addressed as quickly as possible. +2. A CVE (Common Vulnerabilities and Exposures) identifier will be requested for significant vulnerabilities + that are within `cobra` itself. +3. Once a fix is ready, the maintainers will: + - Release a new version containing the fix. + - Update the security advisory with details about the vulnerability. + - Credit the reporter (unless they wish to remain anonymous). + - Credit the fixer (unless they wish to remain anonymous, this may be the same as the reporter). + - Announce the vulnerability through appropriate channels + (GitHub Security Advisory, mailing lists, GitHub Releases, etc.) + +## Supported Versions + +Security fixes will typically only be released for the most recent major release. + +## Upstream Security Issues + +`cobra` generally will not accept vulnerability reports that originate in upstream +dependencies. I.e., if there is a problem in Go code that `cobra` depends on, +it is best to engage that project's maintainers and owners. + +This security policy primarily pertains only to `cobra` itself but if you believe you've +identified a problem that originates in an upstream dependency and is being widely +distributed by `cobra`, please follow the disclosure procedure above: the `cobra` +maintainers will work with you to determine the severity and ecosystem impact. + +## Security Updates and CVEs + +Information about known security vulnerabilities and CVEs affecting `cobra` will +be published as GitHub Security Advisories at +https://github.com/spf13/cobra/security/advisories. + +All users are encouraged to watch the repository and upgrade promptly when +security releases are published. + +## `cobra` Security Best Practices for Users + +When using `cobra` in your CLIs, the `cobra` maintainers recommend the following: + +1. Always use the latest version of `cobra`. +2. [Use Go modules](https://go.dev/blog/using-go-modules) for dependency management. +3. Always use the latest possible version of Go. + +## Security Best Practices for Contributors + +When contributing to `cobra`: + +1. Be mindful of security implications when adding new features or modifying existing ones. +2. Be aware of `cobra`'s extremely large reach: it is used in nearly every Go CLI + (like Kubernetes, Docker, Prometheus, etc. etc.) +3. Write tests that explicitly cover edge cases and potential issues. +4. If you discover a security issue while working on `cobra`, please report it + following the process above rather than opening a public pull request or issue that + addresses the vulnerability. +5. Take personal sec-ops seriously and secure your GitHub account: use [two-factor authentication](https://docs.github.com/en/authentication/securing-your-account-with-two-factor-authentication-2fa), + [sign your commits with a GPG or SSH key](https://docs.github.com/en/authentication/managing-commit-signature-verification/about-commit-signature-verification), + etc. + +## Acknowledgments + +The `cobra` maintainers would like to thank all security researchers and +community members who help keep cobra, its users, and the entire Go ecosystem secure through responsible disclosures!! + +--- + +*This security policy is inspired by the [Open Web Application Security Project (OWASP)](https://owasp.org/) guidelines and security best practices.* diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go index dbb2c298b..78088db69 100644 --- a/vendor/github.com/spf13/cobra/command.go +++ b/vendor/github.com/spf13/cobra/command.go @@ -39,7 +39,7 @@ const ( ) // FParseErrWhitelist configures Flag parse errors to be ignored -type FParseErrWhitelist flag.ParseErrorsWhitelist +type FParseErrWhitelist flag.ParseErrorsAllowlist // Group Structure to manage groups for commands type Group struct { @@ -1296,6 +1296,11 @@ Simply type ` + c.DisplayName() + ` help [path to command] for full details.`, c.Printf("Unknown help topic %#q\n", args) CheckErr(c.Root().Usage()) } else { + // FLow the context down to be used in help text + if cmd.ctx == nil { + cmd.ctx = c.ctx + } + cmd.InitDefaultHelpFlag() // make possible 'help' flag to be shown cmd.InitDefaultVersionFlag() // make possible 'version' flag to be shown CheckErr(cmd.Help()) @@ -1872,7 +1877,7 @@ func (c *Command) ParseFlags(args []string) error { c.mergePersistentFlags() // do it here after merging all flags and just before parse - c.Flags().ParseErrorsWhitelist = flag.ParseErrorsWhitelist(c.FParseErrWhitelist) + c.Flags().ParseErrorsAllowlist = flag.ParseErrorsAllowlist(c.FParseErrWhitelist) err := c.Flags().Parse(args) // Print warnings if they occurred (e.g. deprecated flag messages). @@ -2020,7 +2025,7 @@ func defaultUsageFunc(w io.Writer, in interface{}) error { fmt.Fprint(w, trimRightSpace(c.InheritedFlags().FlagUsages())) } if c.HasHelpSubCommands() { - fmt.Fprintf(w, "\n\nAdditional help topcis:") + fmt.Fprintf(w, "\n\nAdditional help topics:") for _, subcmd := range c.Commands() { if subcmd.IsAdditionalHelpTopicCommand() { fmt.Fprintf(w, "\n %s %s", rpad(subcmd.CommandPath(), subcmd.CommandPathPadding()), subcmd.Short) diff --git a/vendor/github.com/spf13/cobra/completions.go b/vendor/github.com/spf13/cobra/completions.go index a1752f763..d3607c2d2 100644 --- a/vendor/github.com/spf13/cobra/completions.go +++ b/vendor/github.com/spf13/cobra/completions.go @@ -115,6 +115,13 @@ type CompletionOptions struct { DisableDescriptions bool // HiddenDefaultCmd makes the default 'completion' command hidden HiddenDefaultCmd bool + // DefaultShellCompDirective sets the ShellCompDirective that is returned + // if no special directive can be determined + DefaultShellCompDirective *ShellCompDirective +} + +func (receiver *CompletionOptions) SetDefaultShellCompDirective(directive ShellCompDirective) { + receiver.DefaultShellCompDirective = &directive } // Completion is a string that can be used for completions @@ -375,7 +382,7 @@ func (c *Command) getCompletions(args []string) (*Command, []Completion, ShellCo // Error while attempting to parse flags if flagErr != nil { // If error type is flagCompError and we don't want flagCompletion we should ignore the error - if _, ok := flagErr.(*flagCompError); !(ok && !flagCompletion) { + if _, ok := flagErr.(*flagCompError); !ok || flagCompletion { return finalCmd, []Completion{}, ShellCompDirectiveDefault, flagErr } } @@ -480,6 +487,14 @@ func (c *Command) getCompletions(args []string) (*Command, []Completion, ShellCo } } else { directive = ShellCompDirectiveDefault + // check current and parent commands for a custom DefaultShellCompDirective + for cmd := finalCmd; cmd != nil; cmd = cmd.parent { + if cmd.CompletionOptions.DefaultShellCompDirective != nil { + directive = *cmd.CompletionOptions.DefaultShellCompDirective + break + } + } + if flag == nil { foundLocalNonPersistentFlag := false // If TraverseChildren is true on the root command we don't check for @@ -773,7 +788,7 @@ See each sub-command's help for details on how to use the generated script. // shell completion for it (prog __complete completion '') subCmd, cmdArgs, err := c.Find(args) if err != nil || subCmd.Name() != compCmdName && - !(subCmd.Name() == ShellCompRequestCmd && len(cmdArgs) > 1 && cmdArgs[0] == compCmdName) { + (subCmd.Name() != ShellCompRequestCmd || len(cmdArgs) <= 1 || cmdArgs[0] != compCmdName) { // The completion command is not being called or being completed so we remove it. c.RemoveCommand(completionCmd) return diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go index d4dfbc5ea..eeed1e92b 100644 --- a/vendor/github.com/spf13/pflag/flag.go +++ b/vendor/github.com/spf13/pflag/flag.go @@ -137,12 +137,16 @@ const ( PanicOnError ) -// ParseErrorsWhitelist defines the parsing errors that can be ignored -type ParseErrorsWhitelist struct { +// ParseErrorsAllowlist defines the parsing errors that can be ignored +type ParseErrorsAllowlist struct { // UnknownFlags will ignore unknown flags errors and continue parsing rest of the flags UnknownFlags bool } +// DEPRECATED: please use ParseErrorsAllowlist instead +// This type will be removed in a future release +type ParseErrorsWhitelist = ParseErrorsAllowlist + // NormalizedName is a flag name that has been normalized according to rules // for the FlagSet (e.g. making '-' and '_' equivalent). type NormalizedName string @@ -158,8 +162,12 @@ type FlagSet struct { // help/usage messages. SortFlags bool - // ParseErrorsWhitelist is used to configure a whitelist of errors - ParseErrorsWhitelist ParseErrorsWhitelist + // ParseErrorsAllowlist is used to configure an allowlist of errors + ParseErrorsAllowlist ParseErrorsAllowlist + + // DEPRECATED: please use ParseErrorsAllowlist instead + // This field will be removed in a future release + ParseErrorsWhitelist ParseErrorsAllowlist name string parsed bool @@ -928,7 +936,6 @@ func VarP(value Value, name, shorthand, usage string) { // returns the error. func (f *FlagSet) fail(err error) error { if f.errorHandling != ContinueOnError { - fmt.Fprintln(f.Output(), err) f.usage() } return err @@ -986,6 +993,8 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin f.usage() return a, ErrHelp case f.ParseErrorsWhitelist.UnknownFlags: + fallthrough + case f.ParseErrorsAllowlist.UnknownFlags: // --unknown=unknownval arg ... // we do not want to lose arg in this case if len(split) >= 2 { @@ -1044,6 +1053,8 @@ func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parse err = ErrHelp return case f.ParseErrorsWhitelist.UnknownFlags: + fallthrough + case f.ParseErrorsAllowlist.UnknownFlags: // '-f=arg arg ...' // we do not want to lose arg in this case if len(shorthands) > 2 && shorthands[1] == '=' { @@ -1158,12 +1169,12 @@ func (f *FlagSet) Parse(arguments []string) error { } f.parsed = true + f.args = make([]string, 0, len(arguments)) + if len(arguments) == 0 { return nil } - f.args = make([]string, 0, len(arguments)) - set := func(flag *Flag, value string) error { return f.Set(flag.Name, value) } @@ -1174,7 +1185,10 @@ func (f *FlagSet) Parse(arguments []string) error { case ContinueOnError: return err case ExitOnError: - fmt.Println(err) + if errors.Is(err, ErrHelp) { + os.Exit(0) + } + fmt.Fprintln(f.Output(), err) os.Exit(2) case PanicOnError: panic(err) @@ -1200,6 +1214,10 @@ func (f *FlagSet) ParseAll(arguments []string, fn func(flag *Flag, value string) case ContinueOnError: return err case ExitOnError: + if errors.Is(err, ErrHelp) { + os.Exit(0) + } + fmt.Fprintln(f.Output(), err) os.Exit(2) case PanicOnError: panic(err) diff --git a/vendor/github.com/spf13/pflag/golangflag.go b/vendor/github.com/spf13/pflag/golangflag.go index f563907e2..e62eab538 100644 --- a/vendor/github.com/spf13/pflag/golangflag.go +++ b/vendor/github.com/spf13/pflag/golangflag.go @@ -8,6 +8,7 @@ import ( goflag "flag" "reflect" "strings" + "time" ) // go test flags prefixes @@ -113,6 +114,38 @@ func (f *FlagSet) AddGoFlagSet(newSet *goflag.FlagSet) { f.addedGoFlagSets = append(f.addedGoFlagSets, newSet) } +// CopyToGoFlagSet will add all current flags to the given Go flag set. +// Deprecation remarks get copied into the usage description. +// Whenever possible, a flag gets added for which Go flags shows +// a proper type in the help message. +func (f *FlagSet) CopyToGoFlagSet(newSet *goflag.FlagSet) { + f.VisitAll(func(flag *Flag) { + usage := flag.Usage + if flag.Deprecated != "" { + usage += " (DEPRECATED: " + flag.Deprecated + ")" + } + + switch value := flag.Value.(type) { + case *stringValue: + newSet.StringVar((*string)(value), flag.Name, flag.DefValue, usage) + case *intValue: + newSet.IntVar((*int)(value), flag.Name, *(*int)(value), usage) + case *int64Value: + newSet.Int64Var((*int64)(value), flag.Name, *(*int64)(value), usage) + case *uintValue: + newSet.UintVar((*uint)(value), flag.Name, *(*uint)(value), usage) + case *uint64Value: + newSet.Uint64Var((*uint64)(value), flag.Name, *(*uint64)(value), usage) + case *durationValue: + newSet.DurationVar((*time.Duration)(value), flag.Name, *(*time.Duration)(value), usage) + case *float64Value: + newSet.Float64Var((*float64)(value), flag.Name, *(*float64)(value), usage) + default: + newSet.Var(flag.Value, flag.Name, usage) + } + }) +} + // ParseSkippedFlags explicitly Parses go test flags (i.e. the one starting with '-test.') with goflag.Parse(), // since by default those are skipped by pflag.Parse(). // Typical usage example: `ParseGoTestFlags(os.Args[1:], goflag.CommandLine)` @@ -125,3 +158,4 @@ func ParseSkippedFlags(osArgs []string, goFlagSet *goflag.FlagSet) error { } return goFlagSet.Parse(skippedFlags) } + diff --git a/vendor/github.com/spf13/pflag/string_to_string.go b/vendor/github.com/spf13/pflag/string_to_string.go index 890a01afc..1d1e3bf91 100644 --- a/vendor/github.com/spf13/pflag/string_to_string.go +++ b/vendor/github.com/spf13/pflag/string_to_string.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/csv" "fmt" + "sort" "strings" ) @@ -62,8 +63,15 @@ func (s *stringToStringValue) Type() string { } func (s *stringToStringValue) String() string { + keys := make([]string, 0, len(*s.value)) + for k := range *s.value { + keys = append(keys, k) + } + sort.Strings(keys) + records := make([]string, 0, len(*s.value)>>1) - for k, v := range *s.value { + for _, k := range keys { + v := (*s.value)[k] records = append(records, k+"="+v) } diff --git a/vendor/github.com/spf13/pflag/time.go b/vendor/github.com/spf13/pflag/time.go index dc024807e..3dee42479 100644 --- a/vendor/github.com/spf13/pflag/time.go +++ b/vendor/github.com/spf13/pflag/time.go @@ -48,7 +48,13 @@ func (d *timeValue) Type() string { return "time" } -func (d *timeValue) String() string { return d.Time.Format(time.RFC3339Nano) } +func (d *timeValue) String() string { + if d.Time.IsZero() { + return "" + } else { + return d.Time.Format(time.RFC3339Nano) + } +} // GetTime return the time value of a flag with the given name func (f *FlagSet) GetTime(name string) (time.Time, error) { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go index 7e19eba09..ffb24e8e3 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go @@ -390,7 +390,8 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []compareResult{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not greater than \"%v\"", e1, e2) + return compareTwoValues(t, e1, e2, []compareResult{compareGreater}, failMessage, msgAndArgs...) } // GreaterOrEqual asserts that the first element is greater than or equal to the second @@ -403,7 +404,8 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []compareResult{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not greater than or equal to \"%v\"", e1, e2) + return compareTwoValues(t, e1, e2, []compareResult{compareGreater, compareEqual}, failMessage, msgAndArgs...) } // Less asserts that the first element is less than the second @@ -415,7 +417,8 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []compareResult{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not less than \"%v\"", e1, e2) + return compareTwoValues(t, e1, e2, []compareResult{compareLess}, failMessage, msgAndArgs...) } // LessOrEqual asserts that the first element is less than or equal to the second @@ -428,7 +431,8 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []compareResult{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not less than or equal to \"%v\"", e1, e2) + return compareTwoValues(t, e1, e2, []compareResult{compareLess, compareEqual}, failMessage, msgAndArgs...) } // Positive asserts that the specified element is positive @@ -440,7 +444,8 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { h.Helper() } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []compareResult{compareGreater}, "\"%v\" is not positive", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not positive", e) + return compareTwoValues(t, e, zero.Interface(), []compareResult{compareGreater}, failMessage, msgAndArgs...) } // Negative asserts that the specified element is negative @@ -452,7 +457,8 @@ func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { h.Helper() } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []compareResult{compareLess}, "\"%v\" is not negative", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not negative", e) + return compareTwoValues(t, e, zero.Interface(), []compareResult{compareLess}, failMessage, msgAndArgs...) } func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []compareResult, failMessage string, msgAndArgs ...interface{}) bool { @@ -468,11 +474,11 @@ func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedCompare compareResult, isComparable := compare(e1, e2, e1Kind) if !isComparable { - return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) + return Fail(t, fmt.Sprintf(`Can not compare type "%T"`, e1), msgAndArgs...) } if !containsValue(allowedComparesResults, compareResult) { - return Fail(t, fmt.Sprintf(failMessage, e1, e2), msgAndArgs...) + return Fail(t, failMessage, msgAndArgs...) } return true diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go index 190634165..c592f6ad5 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -50,10 +50,19 @@ func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string return ElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...) } -// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Emptyf asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // assert.Emptyf(t, obj, "error message %s", "formatted") +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -117,10 +126,8 @@ func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg stri // Errorf asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if assert.Errorf(t, err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) -// } +// actualObj, err := SomeFunction() +// assert.Errorf(t, err, "error message %s", "formatted") func Errorf(t TestingT, err error, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -438,7 +445,19 @@ func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interf return IsNonIncreasing(t, object, append([]interface{}{msg}, args...)...) } +// IsNotTypef asserts that the specified objects are not of the same type. +// +// assert.IsNotTypef(t, &NotMyStruct{}, &MyStruct{}, "error message %s", "formatted") +func IsNotTypef(t TestingT, theType interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return IsNotType(t, theType, object, append([]interface{}{msg}, args...)...) +} + // IsTypef asserts that the specified objects are of the same type. +// +// assert.IsTypef(t, &MyStruct{}, &MyStruct{}, "error message %s", "formatted") func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -585,8 +604,7 @@ func NotElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg str return NotElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...) } -// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmptyf asserts that the specified object is NOT [Empty]. // // if assert.NotEmptyf(t, obj, "error message %s", "formatted") { // assert.Equal(t, "two", obj[1]) @@ -693,12 +711,15 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...) } -// NotSubsetf asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubsetf asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") // assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") +// assert.NotSubsetf(t, [1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted") +// assert.NotSubsetf(t, {"x": 1, "y": 2}, ["z"], "error message %s", "formatted") func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -782,11 +803,15 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg return Same(t, expected, actual, append([]interface{}{msg}, args...)...) } -// Subsetf asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subsetf asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") // assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") +// assert.Subsetf(t, [1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted") +// assert.Subsetf(t, {"x": 1, "y": 2}, ["x"], "error message %s", "formatted") func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go index 21629087b..58db92845 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -92,10 +92,19 @@ func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg st return ElementsMatchf(a.t, listA, listB, msg, args...) } -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Empty asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // a.Empty(obj) +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -103,10 +112,19 @@ func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { return Empty(a.t, object, msgAndArgs...) } -// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Emptyf asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // a.Emptyf(obj, "error message %s", "formatted") +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -224,10 +242,8 @@ func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string // Error asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if a.Error(err) { -// assert.Equal(t, expectedError, err) -// } +// actualObj, err := SomeFunction() +// a.Error(err) func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -297,10 +313,8 @@ func (a *Assertions) ErrorIsf(err error, target error, msg string, args ...inter // Errorf asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if a.Errorf(err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) -// } +// actualObj, err := SomeFunction() +// a.Errorf(err, "error message %s", "formatted") func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -868,7 +882,29 @@ func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...in return IsNonIncreasingf(a.t, object, msg, args...) } +// IsNotType asserts that the specified objects are not of the same type. +// +// a.IsNotType(&NotMyStruct{}, &MyStruct{}) +func (a *Assertions) IsNotType(theType interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsNotType(a.t, theType, object, msgAndArgs...) +} + +// IsNotTypef asserts that the specified objects are not of the same type. +// +// a.IsNotTypef(&NotMyStruct{}, &MyStruct{}, "error message %s", "formatted") +func (a *Assertions) IsNotTypef(theType interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsNotTypef(a.t, theType, object, msg, args...) +} + // IsType asserts that the specified objects are of the same type. +// +// a.IsType(&MyStruct{}, &MyStruct{}) func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -877,6 +913,8 @@ func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAnd } // IsTypef asserts that the specified objects are of the same type. +// +// a.IsTypef(&MyStruct{}, &MyStruct{}, "error message %s", "formatted") func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1162,8 +1200,7 @@ func (a *Assertions) NotElementsMatchf(listA interface{}, listB interface{}, msg return NotElementsMatchf(a.t, listA, listB, msg, args...) } -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmpty asserts that the specified object is NOT [Empty]. // // if a.NotEmpty(obj) { // assert.Equal(t, "two", obj[1]) @@ -1175,8 +1212,7 @@ func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) boo return NotEmpty(a.t, object, msgAndArgs...) } -// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmptyf asserts that the specified object is NOT [Empty]. // // if a.NotEmptyf(obj, "error message %s", "formatted") { // assert.Equal(t, "two", obj[1]) @@ -1378,12 +1414,15 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri return NotSamef(a.t, expected, actual, msg, args...) } -// NotSubset asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubset asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.NotSubset([1, 3, 4], [1, 2]) // a.NotSubset({"x": 1, "y": 2}, {"z": 3}) +// a.NotSubset([1, 3, 4], {1: "one", 2: "two"}) +// a.NotSubset({"x": 1, "y": 2}, ["z"]) func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1391,12 +1430,15 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs return NotSubset(a.t, list, subset, msgAndArgs...) } -// NotSubsetf asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubsetf asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted") // a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") +// a.NotSubsetf([1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted") +// a.NotSubsetf({"x": 1, "y": 2}, ["z"], "error message %s", "formatted") func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1556,11 +1598,15 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, return Samef(a.t, expected, actual, msg, args...) } -// Subset asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subset asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.Subset([1, 2, 3], [1, 2]) // a.Subset({"x": 1, "y": 2}, {"x": 1}) +// a.Subset([1, 2, 3], {1: "one", 2: "two"}) +// a.Subset({"x": 1, "y": 2}, ["x"]) func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1568,11 +1614,15 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ... return Subset(a.t, list, subset, msgAndArgs...) } -// Subsetf asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subsetf asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted") // a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") +// a.Subsetf([1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted") +// a.Subsetf({"x": 1, "y": 2}, ["x"], "error message %s", "formatted") func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go index 1d2f71824..2fdf80fdd 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_order.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go @@ -33,7 +33,7 @@ func isOrdered(t TestingT, object interface{}, allowedComparesResults []compareR compareResult, isComparable := compare(prevValueInterface, valueInterface, firstValueKind) if !isComparable { - return Fail(t, fmt.Sprintf("Can not compare type \"%s\" and \"%s\"", reflect.TypeOf(value), reflect.TypeOf(prevValue)), msgAndArgs...) + return Fail(t, fmt.Sprintf(`Can not compare type "%T" and "%T"`, value, prevValue), msgAndArgs...) } if !containsValue(allowedComparesResults, compareResult) { diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go index 4e91332bb..de8de0cb6 100644 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -210,59 +210,77 @@ the problem actually occurred in calling code.*/ // of each stack frame leading from the current test to the assert call that // failed. func CallerInfo() []string { - var pc uintptr - var ok bool var file string var line int var name string + const stackFrameBufferSize = 10 + pcs := make([]uintptr, stackFrameBufferSize) + callers := []string{} - for i := 0; ; i++ { - pc, file, line, ok = runtime.Caller(i) - if !ok { - // The breaks below failed to terminate the loop, and we ran off the - // end of the call stack. - break - } + offset := 1 - // This is a huge edge case, but it will panic if this is the case, see #180 - if file == "" { - break - } + for { + n := runtime.Callers(offset, pcs) - f := runtime.FuncForPC(pc) - if f == nil { - break - } - name = f.Name() - - // testing.tRunner is the standard library function that calls - // tests. Subtests are called directly by tRunner, without going through - // the Test/Benchmark/Example function that contains the t.Run calls, so - // with subtests we should break when we hit tRunner, without adding it - // to the list of callers. - if name == "testing.tRunner" { + if n == 0 { break } - parts := strings.Split(file, "/") - if len(parts) > 1 { - filename := parts[len(parts)-1] - dir := parts[len(parts)-2] - if (dir != "assert" && dir != "mock" && dir != "require") || filename == "mock_test.go" { - callers = append(callers, fmt.Sprintf("%s:%d", file, line)) + frames := runtime.CallersFrames(pcs[:n]) + + for { + frame, more := frames.Next() + pc = frame.PC + file = frame.File + line = frame.Line + + // This is a huge edge case, but it will panic if this is the case, see #180 + if file == "" { + break } - } - // Drop the package - segments := strings.Split(name, ".") - name = segments[len(segments)-1] - if isTest(name, "Test") || - isTest(name, "Benchmark") || - isTest(name, "Example") { - break + f := runtime.FuncForPC(pc) + if f == nil { + break + } + name = f.Name() + + // testing.tRunner is the standard library function that calls + // tests. Subtests are called directly by tRunner, without going through + // the Test/Benchmark/Example function that contains the t.Run calls, so + // with subtests we should break when we hit tRunner, without adding it + // to the list of callers. + if name == "testing.tRunner" { + break + } + + parts := strings.Split(file, "/") + if len(parts) > 1 { + filename := parts[len(parts)-1] + dir := parts[len(parts)-2] + if (dir != "assert" && dir != "mock" && dir != "require") || filename == "mock_test.go" { + callers = append(callers, fmt.Sprintf("%s:%d", file, line)) + } + } + + // Drop the package + dotPos := strings.LastIndexByte(name, '.') + name = name[dotPos+1:] + if isTest(name, "Test") || + isTest(name, "Benchmark") || + isTest(name, "Example") { + break + } + + if !more { + break + } } + + // Next batch + offset += cap(pcs) } return callers @@ -437,17 +455,34 @@ func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, return true } +func isType(expectedType, object interface{}) bool { + return ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) +} + // IsType asserts that the specified objects are of the same type. -func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { +// +// assert.IsType(t, &MyStruct{}, &MyStruct{}) +func IsType(t TestingT, expectedType, object interface{}, msgAndArgs ...interface{}) bool { + if isType(expectedType, object) { + return true + } if h, ok := t.(tHelper); ok { h.Helper() } + return Fail(t, fmt.Sprintf("Object expected to be of type %T, but was %T", expectedType, object), msgAndArgs...) +} - if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) { - return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...) +// IsNotType asserts that the specified objects are not of the same type. +// +// assert.IsNotType(t, &NotMyStruct{}, &MyStruct{}) +func IsNotType(t TestingT, theType, object interface{}, msgAndArgs ...interface{}) bool { + if !isType(theType, object) { + return true } - - return true + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Fail(t, fmt.Sprintf("Object type expected to be different than %T", theType), msgAndArgs...) } // Equal asserts that two objects are equal. @@ -475,7 +510,6 @@ func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) } return true - } // validateEqualArgs checks whether provided arguments can be safely used in the @@ -510,8 +544,9 @@ func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) b if !same { // both are pointers but not the same type & pointing to the same address return Fail(t, fmt.Sprintf("Not same: \n"+ - "expected: %p %#v\n"+ - "actual : %p %#v", expected, expected, actual, actual), msgAndArgs...) + "expected: %p %#[1]v\n"+ + "actual : %p %#[2]v", + expected, actual), msgAndArgs...) } return true @@ -530,14 +565,14 @@ func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} same, ok := samePointers(expected, actual) if !ok { - //fails when the arguments are not pointers + // fails when the arguments are not pointers return !(Fail(t, "Both arguments must be pointers", msgAndArgs...)) } if same { return Fail(t, fmt.Sprintf( - "Expected and actual point to the same object: %p %#v", - expected, expected), msgAndArgs...) + "Expected and actual point to the same object: %p %#[1]v", + expected), msgAndArgs...) } return true } @@ -549,7 +584,7 @@ func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} func samePointers(first, second interface{}) (same bool, ok bool) { firstPtr, secondPtr := reflect.ValueOf(first), reflect.ValueOf(second) if firstPtr.Kind() != reflect.Ptr || secondPtr.Kind() != reflect.Ptr { - return false, false //not both are pointers + return false, false // not both are pointers } firstType, secondType := reflect.TypeOf(first), reflect.TypeOf(second) @@ -610,7 +645,6 @@ func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interfa } return true - } // EqualExportedValues asserts that the types of two objects are equal and their public @@ -665,7 +699,6 @@ func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} } return Equal(t, expected, actual, msgAndArgs...) - } // NotNil asserts that the specified object is not nil. @@ -715,37 +748,45 @@ func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { // isEmpty gets whether the specified object is considered empty or not. func isEmpty(object interface{}) bool { - // get nil case out of the way if object == nil { return true } - objValue := reflect.ValueOf(object) + return isEmptyValue(reflect.ValueOf(object)) +} +// isEmptyValue gets whether the specified reflect.Value is considered empty or not. +func isEmptyValue(objValue reflect.Value) bool { + if objValue.IsZero() { + return true + } + // Special cases of non-zero values that we consider empty switch objValue.Kind() { // collection types are empty when they have no element + // Note: array types are empty when they match their zero-initialized state. case reflect.Chan, reflect.Map, reflect.Slice: return objValue.Len() == 0 - // pointers are empty if nil or if the value they point to is empty + // non-nil pointers are empty if the value they point to is empty case reflect.Ptr: - if objValue.IsNil() { - return true - } - deref := objValue.Elem().Interface() - return isEmpty(deref) - // for all other types, compare against the zero value - // array types are empty when they match their zero-initialized state - default: - zero := reflect.Zero(objValue.Type()) - return reflect.DeepEqual(object, zero.Interface()) + return isEmptyValue(objValue.Elem()) } + return false } -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Empty asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // assert.Empty(t, obj) +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { pass := isEmpty(object) if !pass { @@ -756,11 +797,9 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { } return pass - } -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmpty asserts that the specified object is NOT [Empty]. // // if assert.NotEmpty(t, obj) { // assert.Equal(t, "two", obj[1]) @@ -775,7 +814,6 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { } return pass - } // getLen tries to get the length of an object. @@ -819,7 +857,6 @@ func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { } return true - } // False asserts that the specified value is false. @@ -834,7 +871,6 @@ func False(t TestingT, value bool, msgAndArgs ...interface{}) bool { } return true - } // NotEqual asserts that the specified values are NOT equal. @@ -857,7 +893,6 @@ func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{ } return true - } // NotEqualValues asserts that two objects are not equal even when converted to the same type @@ -880,7 +915,6 @@ func NotEqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...inte // return (true, false) if element was not found. // return (true, true) if element was found. func containsElement(list interface{}, element interface{}) (ok, found bool) { - listValue := reflect.ValueOf(list) listType := reflect.TypeOf(list) if listType == nil { @@ -915,7 +949,6 @@ func containsElement(list interface{}, element interface{}) (ok, found bool) { } } return true, false - } // Contains asserts that the specified string, list(array, slice...) or map contains the @@ -938,7 +971,6 @@ func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bo } return true - } // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the @@ -961,14 +993,17 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) } return true - } -// Subset asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subset asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // assert.Subset(t, [1, 2, 3], [1, 2]) // assert.Subset(t, {"x": 1, "y": 2}, {"x": 1}) +// assert.Subset(t, [1, 2, 3], {1: "one", 2: "two"}) +// assert.Subset(t, {"x": 1, "y": 2}, ["x"]) func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { if h, ok := t.(tHelper); ok { h.Helper() @@ -983,7 +1018,7 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok } subsetKind := reflect.TypeOf(subset).Kind() - if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { + if subsetKind != reflect.Array && subsetKind != reflect.Slice && subsetKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) } @@ -1007,6 +1042,13 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok } subsetList := reflect.ValueOf(subset) + if subsetKind == reflect.Map { + keys := make([]interface{}, subsetList.Len()) + for idx, key := range subsetList.MapKeys() { + keys[idx] = key.Interface() + } + subsetList = reflect.ValueOf(keys) + } for i := 0; i < subsetList.Len(); i++ { element := subsetList.Index(i).Interface() ok, found := containsElement(list, element) @@ -1021,12 +1063,15 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok return true } -// NotSubset asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubset asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // assert.NotSubset(t, [1, 3, 4], [1, 2]) // assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) +// assert.NotSubset(t, [1, 3, 4], {1: "one", 2: "two"}) +// assert.NotSubset(t, {"x": 1, "y": 2}, ["z"]) func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1041,7 +1086,7 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) } subsetKind := reflect.TypeOf(subset).Kind() - if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { + if subsetKind != reflect.Array && subsetKind != reflect.Slice && subsetKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) } @@ -1065,11 +1110,18 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) } subsetList := reflect.ValueOf(subset) + if subsetKind == reflect.Map { + keys := make([]interface{}, subsetList.Len()) + for idx, key := range subsetList.MapKeys() { + keys[idx] = key.Interface() + } + subsetList = reflect.ValueOf(keys) + } for i := 0; i < subsetList.Len(); i++ { element := subsetList.Index(i).Interface() ok, found := containsElement(list, element) if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) + return Fail(t, fmt.Sprintf("%q could not be applied builtin len()", list), msgAndArgs...) } if !found { return true @@ -1591,10 +1643,8 @@ func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { // Error asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if assert.Error(t, err) { -// assert.Equal(t, expectedError, err) -// } +// actualObj, err := SomeFunction() +// assert.Error(t, err) func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { if err == nil { if h, ok := t.(tHelper); ok { @@ -1667,7 +1717,6 @@ func matchRegexp(rx interface{}, str interface{}) bool { default: return r.MatchString(fmt.Sprint(v)) } - } // Regexp asserts that a specified regexp matches a string. @@ -1703,7 +1752,6 @@ func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interf } return !match - } // Zero asserts that i is the zero value for its type. @@ -1814,6 +1862,11 @@ func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{ return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid json.\nJSON parsing error: '%s'", expected, err.Error()), msgAndArgs...) } + // Shortcut if same bytes + if actual == expected { + return true + } + if err := json.Unmarshal([]byte(actual), &actualJSONAsInterface); err != nil { return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid json.\nJSON parsing error: '%s'", actual, err.Error()), msgAndArgs...) } @@ -1832,6 +1885,11 @@ func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{ return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid yaml.\nYAML parsing error: '%s'", expected, err.Error()), msgAndArgs...) } + // Shortcut if same bytes + if actual == expected { + return true + } + if err := yaml.Unmarshal([]byte(actual), &actualYAMLAsInterface); err != nil { return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid yaml.\nYAML error: '%s'", actual, err.Error()), msgAndArgs...) } @@ -1933,6 +1991,7 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t } ch := make(chan bool, 1) + checkCond := func() { ch <- condition() } timer := time.NewTimer(waitFor) defer timer.Stop() @@ -1940,18 +1999,23 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t ticker := time.NewTicker(tick) defer ticker.Stop() - for tick := ticker.C; ; { + var tickC <-chan time.Time + + // Check the condition once first on the initial call. + go checkCond() + + for { select { case <-timer.C: return Fail(t, "Condition never satisfied", msgAndArgs...) - case <-tick: - tick = nil - go func() { ch <- condition() }() + case <-tickC: + tickC = nil + go checkCond() case v := <-ch: if v { return true } - tick = ticker.C + tickC = ticker.C } } } @@ -1964,6 +2028,9 @@ type CollectT struct { errors []error } +// Helper is like [testing.T.Helper] but does nothing. +func (CollectT) Helper() {} + // Errorf collects the error. func (c *CollectT) Errorf(format string, args ...interface{}) { c.errors = append(c.errors, fmt.Errorf(format, args...)) @@ -2021,35 +2088,42 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time var lastFinishedTickErrs []error ch := make(chan *CollectT, 1) + checkCond := func() { + collect := new(CollectT) + defer func() { + ch <- collect + }() + condition(collect) + } + timer := time.NewTimer(waitFor) defer timer.Stop() ticker := time.NewTicker(tick) defer ticker.Stop() - for tick := ticker.C; ; { + var tickC <-chan time.Time + + // Check the condition once first on the initial call. + go checkCond() + + for { select { case <-timer.C: for _, err := range lastFinishedTickErrs { t.Errorf("%v", err) } return Fail(t, "Condition never satisfied", msgAndArgs...) - case <-tick: - tick = nil - go func() { - collect := new(CollectT) - defer func() { - ch <- collect - }() - condition(collect) - }() + case <-tickC: + tickC = nil + go checkCond() case collect := <-ch: if !collect.failed() { return true } // Keep the errors from the last ended condition, so that they can be copied to t if timeout is reached. lastFinishedTickErrs = collect.errors - tick = ticker.C + tickC = ticker.C } } } @@ -2064,6 +2138,7 @@ func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.D } ch := make(chan bool, 1) + checkCond := func() { ch <- condition() } timer := time.NewTimer(waitFor) defer timer.Stop() @@ -2071,18 +2146,23 @@ func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.D ticker := time.NewTicker(tick) defer ticker.Stop() - for tick := ticker.C; ; { + var tickC <-chan time.Time + + // Check the condition once first on the initial call. + go checkCond() + + for { select { case <-timer.C: return true - case <-tick: - tick = nil - go func() { ch <- condition() }() + case <-tickC: + tickC = nil + go checkCond() case v := <-ch: if v { return Fail(t, "Condition satisfied", msgAndArgs...) } - tick = ticker.C + tickC = ticker.C } } } @@ -2100,9 +2180,12 @@ func ErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool { var expectedText string if target != nil { expectedText = target.Error() + if err == nil { + return Fail(t, fmt.Sprintf("Expected error with %q in chain but got nil.", expectedText), msgAndArgs...) + } } - chain := buildErrorChainString(err) + chain := buildErrorChainString(err, false) return Fail(t, fmt.Sprintf("Target error should be in err chain:\n"+ "expected: %q\n"+ @@ -2125,7 +2208,7 @@ func NotErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool { expectedText = target.Error() } - chain := buildErrorChainString(err) + chain := buildErrorChainString(err, false) return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+ "found: %q\n"+ @@ -2143,11 +2226,17 @@ func ErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{ return true } - chain := buildErrorChainString(err) + expectedType := reflect.TypeOf(target).Elem().String() + if err == nil { + return Fail(t, fmt.Sprintf("An error is expected but got nil.\n"+ + "expected: %s", expectedType), msgAndArgs...) + } + + chain := buildErrorChainString(err, true) return Fail(t, fmt.Sprintf("Should be in error chain:\n"+ - "expected: %q\n"+ - "in chain: %s", target, chain, + "expected: %s\n"+ + "in chain: %s", expectedType, chain, ), msgAndArgs...) } @@ -2161,24 +2250,46 @@ func NotErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interfa return true } - chain := buildErrorChainString(err) + chain := buildErrorChainString(err, true) return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+ - "found: %q\n"+ - "in chain: %s", target, chain, + "found: %s\n"+ + "in chain: %s", reflect.TypeOf(target).Elem().String(), chain, ), msgAndArgs...) } -func buildErrorChainString(err error) string { +func unwrapAll(err error) (errs []error) { + errs = append(errs, err) + switch x := err.(type) { + case interface{ Unwrap() error }: + err = x.Unwrap() + if err == nil { + return + } + errs = append(errs, unwrapAll(err)...) + case interface{ Unwrap() []error }: + for _, err := range x.Unwrap() { + errs = append(errs, unwrapAll(err)...) + } + } + return +} + +func buildErrorChainString(err error, withType bool) string { if err == nil { return "" } - e := errors.Unwrap(err) - chain := fmt.Sprintf("%q", err.Error()) - for e != nil { - chain += fmt.Sprintf("\n\t%q", e.Error()) - e = errors.Unwrap(e) + var chain string + errs := unwrapAll(err) + for i := range errs { + if i != 0 { + chain += "\n\t" + } + chain += fmt.Sprintf("%q", errs[i].Error()) + if withType { + chain += fmt.Sprintf(" (%T)", errs[i]) + } } return chain } diff --git a/vendor/github.com/stretchr/testify/assert/doc.go b/vendor/github.com/stretchr/testify/assert/doc.go index 4953981d3..a0b953aa5 100644 --- a/vendor/github.com/stretchr/testify/assert/doc.go +++ b/vendor/github.com/stretchr/testify/assert/doc.go @@ -1,5 +1,9 @@ // Package assert provides a set of comprehensive testing tools for use with the normal Go testing system. // +// # Note +// +// All functions in this package return a bool value indicating whether the assertion has passed. +// // # Example Usage // // The following is a complete example using assert in a standard test function: diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go index 861ed4b7c..5a6bb75f2 100644 --- a/vendor/github.com/stretchr/testify/assert/http_assertions.go +++ b/vendor/github.com/stretchr/testify/assert/http_assertions.go @@ -138,7 +138,7 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, contains := strings.Contains(body, fmt.Sprint(str)) if !contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...) + Fail(t, fmt.Sprintf("Expected response body for %q to contain %q but found %q", url+"?"+values.Encode(), str, body), msgAndArgs...) } return contains @@ -158,7 +158,7 @@ func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url strin contains := strings.Contains(body, fmt.Sprint(str)) if contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...) + Fail(t, fmt.Sprintf("Expected response body for %q to NOT contain %q but found %q", url+"?"+values.Encode(), str, body), msgAndArgs...) } return !contains diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go index baa0cc7d7..5a74c4f4d 100644 --- a/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go +++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go @@ -1,5 +1,4 @@ //go:build testify_yaml_custom && !testify_yaml_fail && !testify_yaml_default -// +build testify_yaml_custom,!testify_yaml_fail,!testify_yaml_default // Package yaml is an implementation of YAML functions that calls a pluggable implementation. // diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go index b83c6cf64..0bae80e34 100644 --- a/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go +++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go @@ -1,5 +1,4 @@ //go:build !testify_yaml_fail && !testify_yaml_custom -// +build !testify_yaml_fail,!testify_yaml_custom // Package yaml is just an indirection to handle YAML deserialization. // diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go index e78f7dfe6..8041803fd 100644 --- a/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go +++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go @@ -1,5 +1,4 @@ //go:build testify_yaml_fail && !testify_yaml_custom && !testify_yaml_default -// +build testify_yaml_fail,!testify_yaml_custom,!testify_yaml_default // Package yaml is an implementation of YAML functions that always fail. // diff --git a/vendor/github.com/stretchr/testify/mock/mock.go b/vendor/github.com/stretchr/testify/mock/mock.go index eb5682df9..efc89deff 100644 --- a/vendor/github.com/stretchr/testify/mock/mock.go +++ b/vendor/github.com/stretchr/testify/mock/mock.go @@ -208,9 +208,16 @@ func (c *Call) On(methodName string, arguments ...interface{}) *Call { return c.Parent.On(methodName, arguments...) } -// Unset removes a mock handler from being called. +// Unset removes all mock handlers that satisfy the call instance arguments from being +// called. Only supported on call instances with static input arguments. // -// test.On("func", mock.Anything).Unset() +// For example, the only handler remaining after the following would be "MyMethod(2, 2)": +// +// Mock. +// On("MyMethod", 2, 2).Return(0). +// On("MyMethod", 3, 3).Return(0). +// On("MyMethod", Anything, Anything).Return(0) +// Mock.On("MyMethod", 3, 3).Unset() func (c *Call) Unset() *Call { var unlockOnce sync.Once @@ -331,7 +338,10 @@ func (m *Mock) TestData() objx.Map { Setting expectations */ -// Test sets the test struct variable of the mock object +// Test sets the [TestingT] on which errors will be reported, otherwise errors +// will cause a panic. +// Test should not be called on an object that is going to be used in a +// goroutine other than the one running the test function. func (m *Mock) Test(t TestingT) { m.mutex.Lock() defer m.mutex.Unlock() @@ -494,7 +504,7 @@ func (m *Mock) MethodCalled(methodName string, arguments ...interface{}) Argumen // expected call found, but it has already been called with repeatable times if call != nil { m.mutex.Unlock() - m.fail("\nassert: mock: The method has been called over %d times.\n\tEither do one more Mock.On(\"%s\").Return(...), or remove extra call.\n\tThis call was unexpected:\n\t\t%s\n\tat: %s", call.totalCalls, methodName, callString(methodName, arguments, true), assert.CallerInfo()) + m.fail("\nassert: mock: The method has been called over %d times.\n\tEither do one more Mock.On(%#v).Return(...), or remove extra call.\n\tThis call was unexpected:\n\t\t%s\n\tat: %s", call.totalCalls, methodName, callString(methodName, arguments, true), assert.CallerInfo()) } // we have to fail here - because we don't know what to do // as the return arguments. This is because: @@ -514,7 +524,7 @@ func (m *Mock) MethodCalled(methodName string, arguments ...interface{}) Argumen assert.CallerInfo(), ) } else { - m.fail("\nassert: mock: I don't know what to return because the method call was unexpected.\n\tEither do Mock.On(\"%s\").Return(...) first, or remove the %s() call.\n\tThis method was unexpected:\n\t\t%s\n\tat: %s", methodName, methodName, callString(methodName, arguments, true), assert.CallerInfo()) + m.fail("\nassert: mock: I don't know what to return because the method call was unexpected.\n\tEither do Mock.On(%#v).Return(...) first, or remove the %s() call.\n\tThis method was unexpected:\n\t\t%s\n\tat: %s", methodName, methodName, callString(methodName, arguments, true), assert.CallerInfo()) } } @@ -661,7 +671,7 @@ func (m *Mock) AssertNumberOfCalls(t TestingT, methodName string, expectedCalls actualCalls++ } } - return assert.Equal(t, expectedCalls, actualCalls, fmt.Sprintf("Expected number of calls (%d) does not match the actual number of calls (%d).", expectedCalls, actualCalls)) + return assert.Equal(t, expectedCalls, actualCalls, fmt.Sprintf("Expected number of calls (%d) of method %s does not match the actual number of calls (%d).", expectedCalls, methodName, actualCalls)) } // AssertCalled asserts that the method was called. diff --git a/vendor/github.com/stretchr/testify/require/doc.go b/vendor/github.com/stretchr/testify/require/doc.go index 968434724..c8e3f94a8 100644 --- a/vendor/github.com/stretchr/testify/require/doc.go +++ b/vendor/github.com/stretchr/testify/require/doc.go @@ -23,6 +23,8 @@ // // The `require` package have same global functions as in the `assert` package, // but instead of returning a boolean result they call `t.FailNow()`. +// A consequence of this is that it must be called from the goroutine running +// the test function, not from other goroutines created during the test. // // Every assertion function also takes an optional string message as the final argument, // allowing custom error messages to be appended to the message the assertion method outputs. diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go index d8921950d..2d02f9bce 100644 --- a/vendor/github.com/stretchr/testify/require/require.go +++ b/vendor/github.com/stretchr/testify/require/require.go @@ -117,10 +117,19 @@ func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string t.FailNow() } -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Empty asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // require.Empty(t, obj) +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -131,10 +140,19 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { t.FailNow() } -// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Emptyf asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // require.Emptyf(t, obj, "error message %s", "formatted") +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -279,10 +297,8 @@ func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, ar // Error asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if require.Error(t, err) { -// require.Equal(t, expectedError, err) -// } +// actualObj, err := SomeFunction() +// require.Error(t, err) func Error(t TestingT, err error, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -373,10 +389,8 @@ func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface // Errorf asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if require.Errorf(t, err, "error message %s", "formatted") { -// require.Equal(t, expectedErrorf, err) -// } +// actualObj, err := SomeFunction() +// require.Errorf(t, err, "error message %s", "formatted") func Errorf(t TestingT, err error, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1097,7 +1111,35 @@ func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interf t.FailNow() } +// IsNotType asserts that the specified objects are not of the same type. +// +// require.IsNotType(t, &NotMyStruct{}, &MyStruct{}) +func IsNotType(t TestingT, theType interface{}, object interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.IsNotType(t, theType, object, msgAndArgs...) { + return + } + t.FailNow() +} + +// IsNotTypef asserts that the specified objects are not of the same type. +// +// require.IsNotTypef(t, &NotMyStruct{}, &MyStruct{}, "error message %s", "formatted") +func IsNotTypef(t TestingT, theType interface{}, object interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.IsNotTypef(t, theType, object, msg, args...) { + return + } + t.FailNow() +} + // IsType asserts that the specified objects are of the same type. +// +// require.IsType(t, &MyStruct{}, &MyStruct{}) func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1109,6 +1151,8 @@ func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs } // IsTypef asserts that the specified objects are of the same type. +// +// require.IsTypef(t, &MyStruct{}, &MyStruct{}, "error message %s", "formatted") func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1469,8 +1513,7 @@ func NotElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg str t.FailNow() } -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmpty asserts that the specified object is NOT [Empty]. // // if require.NotEmpty(t, obj) { // require.Equal(t, "two", obj[1]) @@ -1485,8 +1528,7 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) { t.FailNow() } -// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmptyf asserts that the specified object is NOT [Empty]. // // if require.NotEmptyf(t, obj, "error message %s", "formatted") { // require.Equal(t, "two", obj[1]) @@ -1745,12 +1787,15 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, t.FailNow() } -// NotSubset asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubset asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // require.NotSubset(t, [1, 3, 4], [1, 2]) // require.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) +// require.NotSubset(t, [1, 3, 4], {1: "one", 2: "two"}) +// require.NotSubset(t, {"x": 1, "y": 2}, ["z"]) func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1761,12 +1806,15 @@ func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...i t.FailNow() } -// NotSubsetf asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubsetf asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // require.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") // require.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") +// require.NotSubsetf(t, [1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted") +// require.NotSubsetf(t, {"x": 1, "y": 2}, ["z"], "error message %s", "formatted") func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1971,11 +2019,15 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg t.FailNow() } -// Subset asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subset asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // require.Subset(t, [1, 2, 3], [1, 2]) // require.Subset(t, {"x": 1, "y": 2}, {"x": 1}) +// require.Subset(t, [1, 2, 3], {1: "one", 2: "two"}) +// require.Subset(t, {"x": 1, "y": 2}, ["x"]) func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1986,11 +2038,15 @@ func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...inte t.FailNow() } -// Subsetf asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subsetf asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // require.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") // require.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") +// require.Subsetf(t, [1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted") +// require.Subsetf(t, {"x": 1, "y": 2}, ["x"], "error message %s", "formatted") func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go index 1bd87304f..e6f7e9446 100644 --- a/vendor/github.com/stretchr/testify/require/require_forward.go +++ b/vendor/github.com/stretchr/testify/require/require_forward.go @@ -93,10 +93,19 @@ func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg st ElementsMatchf(a.t, listA, listB, msg, args...) } -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Empty asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // a.Empty(obj) +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -104,10 +113,19 @@ func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) { Empty(a.t, object, msgAndArgs...) } -// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Emptyf asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // a.Emptyf(obj, "error message %s", "formatted") +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -225,10 +243,8 @@ func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string // Error asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if a.Error(err) { -// assert.Equal(t, expectedError, err) -// } +// actualObj, err := SomeFunction() +// a.Error(err) func (a *Assertions) Error(err error, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -298,10 +314,8 @@ func (a *Assertions) ErrorIsf(err error, target error, msg string, args ...inter // Errorf asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if a.Errorf(err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) -// } +// actualObj, err := SomeFunction() +// a.Errorf(err, "error message %s", "formatted") func (a *Assertions) Errorf(err error, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -869,7 +883,29 @@ func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...in IsNonIncreasingf(a.t, object, msg, args...) } +// IsNotType asserts that the specified objects are not of the same type. +// +// a.IsNotType(&NotMyStruct{}, &MyStruct{}) +func (a *Assertions) IsNotType(theType interface{}, object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + IsNotType(a.t, theType, object, msgAndArgs...) +} + +// IsNotTypef asserts that the specified objects are not of the same type. +// +// a.IsNotTypef(&NotMyStruct{}, &MyStruct{}, "error message %s", "formatted") +func (a *Assertions) IsNotTypef(theType interface{}, object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + IsNotTypef(a.t, theType, object, msg, args...) +} + // IsType asserts that the specified objects are of the same type. +// +// a.IsType(&MyStruct{}, &MyStruct{}) func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -878,6 +914,8 @@ func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAnd } // IsTypef asserts that the specified objects are of the same type. +// +// a.IsTypef(&MyStruct{}, &MyStruct{}, "error message %s", "formatted") func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1163,8 +1201,7 @@ func (a *Assertions) NotElementsMatchf(listA interface{}, listB interface{}, msg NotElementsMatchf(a.t, listA, listB, msg, args...) } -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmpty asserts that the specified object is NOT [Empty]. // // if a.NotEmpty(obj) { // assert.Equal(t, "two", obj[1]) @@ -1176,8 +1213,7 @@ func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) { NotEmpty(a.t, object, msgAndArgs...) } -// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmptyf asserts that the specified object is NOT [Empty]. // // if a.NotEmptyf(obj, "error message %s", "formatted") { // assert.Equal(t, "two", obj[1]) @@ -1379,12 +1415,15 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri NotSamef(a.t, expected, actual, msg, args...) } -// NotSubset asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubset asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.NotSubset([1, 3, 4], [1, 2]) // a.NotSubset({"x": 1, "y": 2}, {"z": 3}) +// a.NotSubset([1, 3, 4], {1: "one", 2: "two"}) +// a.NotSubset({"x": 1, "y": 2}, ["z"]) func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1392,12 +1431,15 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs NotSubset(a.t, list, subset, msgAndArgs...) } -// NotSubsetf asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubsetf asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted") // a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") +// a.NotSubsetf([1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted") +// a.NotSubsetf({"x": 1, "y": 2}, ["z"], "error message %s", "formatted") func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1557,11 +1599,15 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, Samef(a.t, expected, actual, msg, args...) } -// Subset asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subset asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.Subset([1, 2, 3], [1, 2]) // a.Subset({"x": 1, "y": 2}, {"x": 1}) +// a.Subset([1, 2, 3], {1: "one", 2: "two"}) +// a.Subset({"x": 1, "y": 2}, ["x"]) func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1569,11 +1615,15 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ... Subset(a.t, list, subset, msgAndArgs...) } -// Subsetf asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subsetf asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted") // a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") +// a.Subsetf([1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted") +// a.Subsetf({"x": 1, "y": 2}, ["x"], "error message %s", "formatted") func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/ulikunitz/xz/TODO.md b/vendor/github.com/ulikunitz/xz/TODO.md index c466ffeda..8f9650c13 100644 --- a/vendor/github.com/ulikunitz/xz/TODO.md +++ b/vendor/github.com/ulikunitz/xz/TODO.md @@ -1,9 +1,5 @@ # TODO list -## Release v0.5.x - -1. Support check flag in gxz command. - ## Release v0.6 1. Review encoder and check for lzma improvements under xz. @@ -86,6 +82,19 @@ ## Log +## 2025-08-28 + +Release v0.5.14 addresses the security vulnerability CVE-2025-58058. If you put +bytes in from of a LZMA stream, the header might not be read correctly and +memory for the dictionary buffer allocated. I have implemented mitigations for +the problem. + +### 2025-08-20 + +Release v0.5.13 addressed issue #61 regarding handling of multiple WriteClosers +together. So I added a new package xio with a WriteCloserStack to address the +issue. + ### 2024-04-03 Release v0.5.12 updates README.md and SECURITY.md to address the supply chain diff --git a/vendor/github.com/ulikunitz/xz/lzma/header.go b/vendor/github.com/ulikunitz/xz/lzma/header.go index 1ae7d80ca..34aa097e1 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/header.go +++ b/vendor/github.com/ulikunitz/xz/lzma/header.go @@ -60,36 +60,36 @@ const noHeaderSize uint64 = 1<<64 - 1 // HeaderLen provides the length of the LZMA file header. const HeaderLen = 13 -// header represents the header of an LZMA file. -type header struct { - properties Properties - dictCap int - // uncompressed size; negative value if no size is given - size int64 +// Header represents the Header of an LZMA file. +type Header struct { + Properties Properties + DictSize uint32 + // uncompressed Size; negative value if no Size is given + Size int64 } // marshalBinary marshals the header. -func (h *header) marshalBinary() (data []byte, err error) { - if err = h.properties.verify(); err != nil { +func (h *Header) marshalBinary() (data []byte, err error) { + if err = h.Properties.verify(); err != nil { return nil, err } - if !(0 <= h.dictCap && int64(h.dictCap) <= MaxDictCap) { + if !(h.DictSize <= MaxDictCap) { return nil, fmt.Errorf("lzma: DictCap %d out of range", - h.dictCap) + h.DictSize) } data = make([]byte, 13) // property byte - data[0] = h.properties.Code() + data[0] = h.Properties.Code() // dictionary capacity - putUint32LE(data[1:5], uint32(h.dictCap)) + putUint32LE(data[1:5], uint32(h.DictSize)) // uncompressed size var s uint64 - if h.size > 0 { - s = uint64(h.size) + if h.Size > 0 { + s = uint64(h.Size) } else { s = noHeaderSize } @@ -99,20 +99,20 @@ func (h *header) marshalBinary() (data []byte, err error) { } // unmarshalBinary unmarshals the header. -func (h *header) unmarshalBinary(data []byte) error { +func (h *Header) unmarshalBinary(data []byte) error { if len(data) != HeaderLen { return errors.New("lzma.unmarshalBinary: data has wrong length") } // properties var err error - if h.properties, err = PropertiesForCode(data[0]); err != nil { + if h.Properties, err = PropertiesForCode(data[0]); err != nil { return err } // dictionary capacity - h.dictCap = int(uint32LE(data[1:])) - if h.dictCap < 0 { + h.DictSize = uint32LE(data[1:]) + if int(h.DictSize) < 0 { return errors.New( "LZMA header: dictionary capacity exceeds maximum " + "integer") @@ -121,10 +121,10 @@ func (h *header) unmarshalBinary(data []byte) error { // uncompressed size s := uint64LE(data[5:]) if s == noHeaderSize { - h.size = -1 + h.Size = -1 } else { - h.size = int64(s) - if h.size < 0 { + h.Size = int64(s) + if h.Size < 0 { return errors.New( "LZMA header: uncompressed size " + "out of int64 range") @@ -134,9 +134,9 @@ func (h *header) unmarshalBinary(data []byte) error { return nil } -// validDictCap checks whether the dictionary capacity is correct. This +// validDictSize checks whether the dictionary capacity is correct. This // is used to weed out wrong file headers. -func validDictCap(dictcap int) bool { +func validDictSize(dictcap int) bool { if int64(dictcap) == MaxDictCap { return true } @@ -155,13 +155,16 @@ func validDictCap(dictcap int) bool { // dictionary sizes of 2^n or 2^n+2^(n-1) with n >= 10 or 2^32-1. If // there is an explicit size it must not exceed 256 GiB. The length of // the data argument must be HeaderLen. +// +// This function should be disregarded because there is no guarantee that LZMA +// files follow the constraints. func ValidHeader(data []byte) bool { - var h header + var h Header if err := h.unmarshalBinary(data); err != nil { return false } - if !validDictCap(h.dictCap) { + if !validDictSize(int(h.DictSize)) { return false } - return h.size < 0 || h.size <= 1<<38 + return h.Size < 0 || h.Size <= 1<<38 } diff --git a/vendor/github.com/ulikunitz/xz/lzma/reader.go b/vendor/github.com/ulikunitz/xz/lzma/reader.go index ae911c389..b656c2618 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/reader.go +++ b/vendor/github.com/ulikunitz/xz/lzma/reader.go @@ -6,25 +6,32 @@ // Reader and Writer support the classic LZMA format. Reader2 and // Writer2 support the decoding and encoding of LZMA2 streams. // -// The package is written completely in Go and doesn't rely on any external +// The package is written completely in Go and does not rely on any external // library. package lzma import ( "errors" + "fmt" "io" ) // ReaderConfig stores the parameters for the reader of the classic LZMA // format. type ReaderConfig struct { + // Since v0.5.14 this parameter sets an upper limit for a .lzma file's + // dictionary size. This helps to mitigate problems with mangled + // headers. DictCap int } // fill converts the zero values of the configuration to the default values. func (c *ReaderConfig) fill() { if c.DictCap == 0 { - c.DictCap = 8 * 1024 * 1024 + // set an upper limit of 2 GB for dictionary capacity to address + // the zero prefix security issue. + c.DictCap = 1 << 31 + // original: c.DictCap = 8 * 1024 * 1024 } } @@ -39,10 +46,33 @@ func (c *ReaderConfig) Verify() error { } // Reader provides a reader for LZMA files or streams. +// +// # Security concerns +// +// Note that LZMA format doesn't support a magic marker in the header. So +// [NewReader] cannot determine whether it reads the actual header. For instance +// the LZMA stream might have a zero byte in front of the reader, leading to +// larger dictionary sizes and file sizes. The code will detect later that there +// are problems with the stream, but the dictionary has already been allocated +// and this might consume a lot of memory. +// +// Version 0.5.14 introduces built-in mitigations: +// +// - The [ReaderConfig] DictCap field is now interpreted as a limit for the +// dictionary size. +// - The default is 2 Gigabytes (2^31 bytes). +// - Users can check with the [Reader.Header] method what the actual values are in +// their LZMA files and set a smaller limit using [ReaderConfig]. +// - The dictionary size doesn't exceed the larger of the file size and +// the minimum dictionary size. This is another measure to prevent huge +// memory allocations for the dictionary. +// - The code supports stream sizes only up to a pebibyte (1024^5). type Reader struct { - lzma io.Reader - h header - d *decoder + lzma io.Reader + header Header + // headerOrig stores the original header read from the stream. + headerOrig Header + d *decoder } // NewReader creates a new reader for an LZMA stream using the classic @@ -51,8 +81,37 @@ func NewReader(lzma io.Reader) (r *Reader, err error) { return ReaderConfig{}.NewReader(lzma) } +// ErrDictSize reports about an error of the dictionary size. +type ErrDictSize struct { + ConfigDictCap int + HeaderDictSize uint32 + Message string +} + +// Error returns the error message. +func (e *ErrDictSize) Error() string { + return e.Message +} + +func newErrDictSize(messageformat string, + configDictCap int, headerDictSize uint32, + args ...interface{}) *ErrDictSize { + newArgs := make([]interface{}, len(args)+2) + newArgs[0] = configDictCap + newArgs[1] = headerDictSize + copy(newArgs[2:], args) + return &ErrDictSize{ + ConfigDictCap: configDictCap, + HeaderDictSize: headerDictSize, + Message: fmt.Sprintf(messageformat, newArgs...), + } +} + +// We support only files not larger than 1 << 50 bytes (a pebibyte, 1024^5). +const maxStreamSize = 1 << 50 + // NewReader creates a new reader for an LZMA stream in the classic -// format. The function reads and verifies the the header of the LZMA +// format. The function reads and verifies the header of the LZMA // stream. func (c ReaderConfig) NewReader(lzma io.Reader) (r *Reader, err error) { if err = c.Verify(); err != nil { @@ -66,29 +125,63 @@ func (c ReaderConfig) NewReader(lzma io.Reader) (r *Reader, err error) { return nil, err } r = &Reader{lzma: lzma} - if err = r.h.unmarshalBinary(data); err != nil { + if err = r.header.unmarshalBinary(data); err != nil { return nil, err } - if r.h.dictCap < MinDictCap { - r.h.dictCap = MinDictCap + r.headerOrig = r.header + dictSize := int64(r.header.DictSize) + if int64(c.DictCap) < dictSize { + return nil, newErrDictSize( + "lzma: header dictionary size %[2]d exceeds configured dictionary capacity %[1]d", + c.DictCap, uint32(dictSize), + ) + } + if dictSize < MinDictCap { + dictSize = MinDictCap + } + // original code: disabled this because there is no point in increasing + // the dictionary above what is stated in the file. + /* + if int64(c.DictCap) > int64(dictSize) { + dictSize = int64(c.DictCap) + } + */ + size := r.header.Size + if size >= 0 && size < dictSize { + dictSize = size } - dictCap := r.h.dictCap - if c.DictCap > dictCap { - dictCap = c.DictCap + // Protect against modified or malicious headers. + if size > maxStreamSize { + return nil, fmt.Errorf( + "lzma: stream size %d exceeds a pebibyte (1024^5)", + size) } + if dictSize < MinDictCap { + dictSize = MinDictCap + } + + r.header.DictSize = uint32(dictSize) - state := newState(r.h.properties) - dict, err := newDecoderDict(dictCap) + state := newState(r.header.Properties) + dict, err := newDecoderDict(int(dictSize)) if err != nil { return nil, err } - r.d, err = newDecoder(ByteReader(lzma), state, dict, r.h.size) + r.d, err = newDecoder(ByteReader(lzma), state, dict, r.header.Size) if err != nil { return nil, err } return r, nil } +// Header returns the header as read from the LZMA stream. It is intended to +// allow the user to understand what parameters are typically provided in the +// headers of the LZMA files and set the DictCap field in [ReaderConfig] +// accordingly. +func (r *Reader) Header() (h Header, ok bool) { + return r.headerOrig, r.d != nil +} + // EOSMarker indicates that an EOS marker has been encountered. func (r *Reader) EOSMarker() bool { return r.d.eosMarker diff --git a/vendor/github.com/ulikunitz/xz/lzma/writer.go b/vendor/github.com/ulikunitz/xz/lzma/writer.go index e8f89811d..f73bb73f2 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/writer.go +++ b/vendor/github.com/ulikunitz/xz/lzma/writer.go @@ -96,21 +96,21 @@ func (c *WriterConfig) Verify() error { } // header returns the header structure for this configuration. -func (c *WriterConfig) header() header { - h := header{ - properties: *c.Properties, - dictCap: c.DictCap, - size: -1, +func (c *WriterConfig) header() Header { + h := Header{ + Properties: *c.Properties, + DictSize: uint32(c.DictCap), + Size: -1, } if c.SizeInHeader { - h.size = c.Size + h.Size = c.Size } return h } // Writer writes an LZMA stream in the classic format. type Writer struct { - h header + h Header bw io.ByteWriter buf *bufio.Writer e *encoder @@ -130,12 +130,12 @@ func (c WriterConfig) NewWriter(lzma io.Writer) (w *Writer, err error) { w.buf = bufio.NewWriter(lzma) w.bw = w.buf } - state := newState(w.h.properties) - m, err := c.Matcher.new(w.h.dictCap) + state := newState(w.h.Properties) + m, err := c.Matcher.new(int(w.h.DictSize)) if err != nil { return nil, err } - dict, err := newEncoderDict(w.h.dictCap, c.BufSize, m) + dict, err := newEncoderDict(int(w.h.DictSize), c.BufSize, m) if err != nil { return nil, err } @@ -171,8 +171,8 @@ func (w *Writer) writeHeader() error { // Write puts data into the Writer. func (w *Writer) Write(p []byte) (n int, err error) { - if w.h.size >= 0 { - m := w.h.size + if w.h.Size >= 0 { + m := w.h.Size m -= w.e.Compressed() + int64(w.e.dict.Buffered()) if m < 0 { m = 0 @@ -192,9 +192,9 @@ func (w *Writer) Write(p []byte) (n int, err error) { // Close closes the writer stream. It ensures that all data from the // buffer will be compressed and the LZMA stream will be finished. func (w *Writer) Close() error { - if w.h.size >= 0 { + if w.h.Size >= 0 { n := w.e.Compressed() + int64(w.e.dict.Buffered()) - if n != w.h.size { + if n != w.h.Size { return errSize } } diff --git a/vendor/go.etcd.io/bbolt/.go-version b/vendor/go.etcd.io/bbolt/.go-version index b6773170a..7bdcec52d 100644 --- a/vendor/go.etcd.io/bbolt/.go-version +++ b/vendor/go.etcd.io/bbolt/.go-version @@ -1 +1 @@ -1.23.10 +1.23.12 diff --git a/vendor/go.etcd.io/bbolt/tx.go b/vendor/go.etcd.io/bbolt/tx.go index 7123ded8f..1669fb16a 100644 --- a/vendor/go.etcd.io/bbolt/tx.go +++ b/vendor/go.etcd.io/bbolt/tx.go @@ -387,16 +387,43 @@ func (tx *Tx) Copy(w io.Writer) error { // WriteTo writes the entire database to a writer. // If err == nil then exactly tx.Size() bytes will be written into the writer. func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { - // Attempt to open reader with WriteFlag - f, err := tx.db.openFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0) - if err != nil { - return 0, err - } - defer func() { - if cerr := f.Close(); err == nil { - err = cerr + var f *os.File + // There is a risk that between the time a read-only transaction + // is created and the time the file is actually opened, the + // underlying db file at tx.db.path may have been replaced + // (e.g. via rename). In that case, opening the file again would + // unexpectedly point to a different file, rather than the one + // the transaction was based on. + // + // To overcome this, we reuse the already opened file handle when + // WritFlag not set. When the WriteFlag is set, we reopen the file + // but verify that it still refers to the same underlying file + // (by device and inode). If it does not, we fall back to + // reusing the existing already opened file handle. + if tx.WriteFlag != 0 { + // Attempt to open reader with WriteFlag + f, err = tx.db.openFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0) + if err != nil { + return 0, err } - }() + + if ok, err := sameFile(tx.db.file, f); !ok { + lg := tx.db.Logger() + if cerr := f.Close(); cerr != nil { + lg.Errorf("failed to close the file (%s): %v", tx.db.path, cerr) + } + lg.Warningf("The underlying file has changed, so reuse the already opened file (%s): %v", tx.db.path, err) + f = tx.db.file + } else { + defer func() { + if cerr := f.Close(); err == nil { + err = cerr + } + }() + } + } else { + f = tx.db.file + } // Generate a meta page. We use the same page data for both meta pages. buf := make([]byte, tx.db.pageSize) @@ -423,13 +450,13 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { return n, fmt.Errorf("meta 1 copy: %s", err) } - // Move past the meta pages in the file. - if _, err := f.Seek(int64(tx.db.pageSize*2), io.SeekStart); err != nil { - return n, fmt.Errorf("seek: %s", err) - } + // Copy data pages using a SectionReader to avoid affecting f's offset. + dataOffset := int64(tx.db.pageSize * 2) + dataSize := tx.Size() - dataOffset + sr := io.NewSectionReader(f, dataOffset, dataSize) // Copy data pages. - wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2)) + wn, err := io.CopyN(w, sr, dataSize) n += wn if err != nil { return n, err @@ -438,6 +465,19 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { return n, nil } +func sameFile(f1, f2 *os.File) (bool, error) { + fi1, err := f1.Stat() + if err != nil { + return false, fmt.Errorf("failed to get fileInfo of the first file (%s): %w", f1.Name(), err) + } + fi2, err := f2.Stat() + if err != nil { + return false, fmt.Errorf("failed to get fileInfo of the second file (%s): %w", f2.Name(), err) + } + + return os.SameFile(fi1, fi2), nil +} + // CopyFile copies the entire database to file at the given path. // A reader transaction is maintained during the copy so it is safe to continue // using the database while a copy is in progress. diff --git a/vendor/go.opentelemetry.io/otel/.clomonitor.yml b/vendor/go.opentelemetry.io/otel/.clomonitor.yml new file mode 100644 index 000000000..128d61a22 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.clomonitor.yml @@ -0,0 +1,3 @@ +exemptions: + - check: artifacthub_badge + reason: "Artifact Hub doesn't support Go packages" diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index 888e5da80..5f69cc027 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -66,8 +66,6 @@ linters: desc: Do not use cross-module internal packages. - pkg: go.opentelemetry.io/otel/internal/internaltest desc: Do not use cross-module internal packages. - - pkg: go.opentelemetry.io/otel/internal/matchers - desc: Do not use cross-module internal packages. otlp-internal: files: - '!**/exporters/otlp/internal/**/*.go' @@ -190,6 +188,10 @@ linters: - legacy - std-error-handling rules: + - linters: + - revive + path: schema/v.*/types/.* + text: avoid meaningless package names # TODO: Having appropriate comments for exported objects helps development, # even for objects in internal packages. Appropriate comments for all # exported objects should be added and this exclusion removed. diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index 648e4abab..4acc75701 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -11,6 +11,61 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm +## [1.37.0/0.59.0/0.13.0] 2025-06-25 + +### Added + +- The `go.opentelemetry.io/otel/semconv/v1.33.0` package. + The package contains semantic conventions from the `v1.33.0` version of the OpenTelemetry Semantic Conventions. + See the [migration documentation](./semconv/v1.33.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.32.0.`(#6799) +- The `go.opentelemetry.io/otel/semconv/v1.34.0` package. + The package contains semantic conventions from the `v1.34.0` version of the OpenTelemetry Semantic Conventions. (#6812) +- Add metric's schema URL as `otel_scope_schema_url` label in `go.opentelemetry.io/otel/exporters/prometheus`. (#5947) +- Add metric's scope attributes as `otel_scope_[attribute]` labels in `go.opentelemetry.io/otel/exporters/prometheus`. (#5947) +- Add `EventName` to `EnabledParameters` in `go.opentelemetry.io/otel/log`. (#6825) +- Add `EventName` to `EnabledParameters` in `go.opentelemetry.io/otel/sdk/log`. (#6825) +- Changed handling of `go.opentelemetry.io/otel/exporters/prometheus` metric renaming to add unit suffixes when it doesn't match one of the pre-defined values in the unit suffix map. (#6839) + +### Changed + +- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/bridge/opentracing`. (#6827) +- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/exporters/zipkin`. (#6829) +- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/metric`. (#6832) +- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/sdk/resource`. (#6834) +- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/sdk/trace`. (#6835) +- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/trace`. (#6836) +- `Record.Resource` now returns `*resource.Resource` instead of `resource.Resource` in `go.opentelemetry.io/otel/sdk/log`. (#6864) +- Retry now shows error cause for context timeout in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6898) + +### Fixed + +- Stop stripping trailing slashes from configured endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#6710) +- Stop stripping trailing slashes from configured endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#6710) +- Stop stripping trailing slashes from configured endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#6710) +- Stop stripping trailing slashes from configured endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#6710) +- Validate exponential histogram scale range for Prometheus compatibility in `go.opentelemetry.io/otel/exporters/prometheus`. (#6822) +- Context cancellation during metric pipeline produce does not corrupt data in `go.opentelemetry.io/otel/sdk/metric`. (#6914) + +### Removed + +- `go.opentelemetry.io/otel/exporters/prometheus` no longer exports `otel_scope_info` metric. (#6770) + +## [0.12.2] 2025-05-22 + +### Fixed + +- Retract `v0.12.0` release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` module that contains invalid dependencies. (#6804) +- Retract `v0.12.0` release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` module that contains invalid dependencies. (#6804) +- Retract `v0.12.0` release of `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` module that contains invalid dependencies. (#6804) + +## [0.12.1] 2025-05-21 + +### Fixes + +- Use the proper dependency version of `go.opentelemetry.io/otel/sdk/log/logtest` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#6800) +- Use the proper dependency version of `go.opentelemetry.io/otel/sdk/log/logtest` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6800) +- Use the proper dependency version of `go.opentelemetry.io/otel/sdk/log/logtest` in `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#6800) + ## [1.36.0/0.58.0/0.12.0] 2025-05-20 ### Added @@ -3288,7 +3343,10 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.36.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.37.0...HEAD +[1.37.0/0.59.0/0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.37.0 +[0.12.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.2 +[0.12.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.1 [1.36.0/0.58.0/0.12.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.36.0 [1.35.0/0.57.0/0.11.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.35.0 [1.34.0/0.56.0/0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.34.0 diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index 1902dac05..f9ddc281f 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -109,10 +109,9 @@ A PR is considered **ready to merge** when: This is not enforced through automation, but needs to be validated by the maintainer merging. - * The qualified approvals need to be from [Approver]s/[Maintainer]s - affiliated with different companies. Two qualified approvals from - [Approver]s or [Maintainer]s affiliated with the same company counts as a - single qualified approval. + * At least one of the qualified approvals need to be from an + [Approver]/[Maintainer] affiliated with a different company than the author + of the PR. * PRs introducing changes that have already been discussed and consensus reached only need one qualified approval. The discussion and resolution needs to be linked to the PR. @@ -650,11 +649,11 @@ should be canceled. ### Maintainers -- [Damien Mathieu](https://github.com/dmathieu), Elastic -- [David Ashpole](https://github.com/dashpole), Google -- [Robert Pająk](https://github.com/pellared), Splunk -- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics -- [Tyler Yahn](https://github.com/MrAlias), Splunk +- [Damien Mathieu](https://github.com/dmathieu), Elastic ([GPG](https://keys.openpgp.org/search?q=5A126B972A81A6CE443E5E1B408B8E44F0873832)) +- [David Ashpole](https://github.com/dashpole), Google ([GPG](https://keys.openpgp.org/search?q=C0D1BDDCAAEAE573673085F176327DA4D864DC70)) +- [Robert Pająk](https://github.com/pellared), Splunk ([GPG](https://keys.openpgp.org/search?q=CDAD3A60476A3DE599AA5092E5F7C35A4DBE90C2)) +- [Sam Xie](https://github.com/XSAM), Splunk ([GPG](https://keys.openpgp.org/search?q=AEA033782371ABB18EE39188B8044925D6FEEBEA)) +- [Tyler Yahn](https://github.com/MrAlias), Splunk ([GPG](https://keys.openpgp.org/search?q=0x46B0F3E1A8B1BA5A)) ### Emeritus diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index 62a56f4d3..4fa423ca0 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -293,7 +293,7 @@ semconv-generate: $(SEMCONVKIT) --param tag=$(TAG) \ go \ /home/weaver/target - $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" + $(SEMCONVKIT) -semconv "$(SEMCONVPKG)" -tag "$(TAG)" .PHONY: gorelease gorelease: $(OTEL_GO_MOD_DIRS:%=gorelease/%) diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index b60078812..5fa1b75c6 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -7,6 +7,7 @@ [![OpenSSF Scorecard](https://api.scorecard.dev/projects/github.com/open-telemetry/opentelemetry-go/badge)](https://scorecard.dev/viewer/?uri=github.com/open-telemetry/opentelemetry-go) [![OpenSSF Best Practices](https://www.bestpractices.dev/projects/9996/badge)](https://www.bestpractices.dev/projects/9996) [![Fuzzing Status](https://oss-fuzz-build-logs.storage.googleapis.com/badges/opentelemetry-go.svg)](https://issues.oss-fuzz.com/issues?q=project:opentelemetry-go) +[![FOSSA Status](https://app.fossa.com/api/projects/custom%2B162%2Fgithub.com%2Fopen-telemetry%2Fopentelemetry-go.svg?type=shield&issueType=license)](https://app.fossa.com/projects/custom%2B162%2Fgithub.com%2Fopen-telemetry%2Fopentelemetry-go?ref=badge_shield&issueType=license) [![Slack](https://img.shields.io/badge/slack-@cncf/otel--go-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C01NPAXACKT) OpenTelemetry-Go is the [Go](https://golang.org/) implementation of [OpenTelemetry](https://opentelemetry.io/). diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md index 7c1a9119d..1ddcdef03 100644 --- a/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -112,6 +112,29 @@ It is critical you make sure the version you push upstream is correct. Finally create a Release for the new `` on GitHub. The release body should include all the release notes from the Changelog for this release. +### Sign the Release Artifact + +To ensure we comply with CNCF best practices, we need to sign the release artifact. +The tarball attached to the GitHub release needs to be signed with your GPG key. + +Follow [these steps] to sign the release artifact and upload it to GitHub. +You can use [this script] to verify the contents of the tarball before signing it. + +Be sure to use the correct GPG key when signing the release artifact. + +```terminal +gpg --local-user --armor --detach-sign opentelemetry-go-.tar.gz +``` + +You can verify the signature with: + +```terminal +gpg --verify opentelemetry-go-.tar.gz.asc opentelemetry-go-.tar.gz +``` + +[these steps]: https://wiki.debian.org/Creating%20signed%20GitHub%20releases +[this script]: https://github.com/MrAlias/attest-sh + ## Post-Release ### Contrib Repository diff --git a/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile index 51fb76b30..935bd4876 100644 --- a/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile +++ b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile @@ -1,4 +1,4 @@ # This is a renovate-friendly source of Docker images. -FROM python:3.13.3-slim-bullseye@sha256:9e3f9243e06fd68eb9519074b49878eda20ad39a855fac51aaffb741de20726e AS python -FROM otel/weaver:v0.15.0@sha256:1cf1c72eaed57dad813c2e359133b8a15bd4facf305aae5b13bdca6d3eccff56 AS weaver +FROM python:3.13.5-slim-bullseye@sha256:5b9fc0d8ef79cfb5f300e61cb516e0c668067bbf77646762c38c94107e230dbc AS python +FROM otel/weaver:v0.15.2@sha256:b13acea09f721774daba36344861f689ac4bb8d6ecd94c4600b4d590c8fb34b9 AS weaver FROM avtodev/markdown-lint:v1@sha256:6aeedc2f49138ce7a1cd0adffc1b1c0321b841dc2102408967d9301c031949ee AS markdown diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go index cf3c88e15..cefe4ab91 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go @@ -13,7 +13,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.34.0" ) type ( diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go index 5ecd859a5..0d8619715 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go @@ -11,7 +11,7 @@ import ( "os" "regexp" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.34.0" ) type containerIDProvider func() (string, error) diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go index 813f05624..16a062ad8 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go @@ -12,7 +12,7 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.34.0" ) const ( diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go index 2d0f65498..781903923 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go @@ -8,7 +8,7 @@ import ( "errors" "strings" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.34.0" ) type hostIDProvider func() (string, error) diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go index 8a48ab4fa..01b4d27a0 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go @@ -8,7 +8,7 @@ import ( "strings" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.34.0" ) type osDescriptionProvider func() (string, error) diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go index 085fe68fd..6712ce80d 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go @@ -11,7 +11,7 @@ import ( "path/filepath" "runtime" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.34.0" ) type ( diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go index 6872cbb4e..6966ed861 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go @@ -5,6 +5,7 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "context" + "errors" "sync" "sync/atomic" "time" @@ -267,7 +268,7 @@ func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error { if bsp.o.ExportTimeout > 0 { var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, bsp.o.ExportTimeout) + ctx, cancel = context.WithTimeoutCause(ctx, bsp.o.ExportTimeout, errors.New("processor export timeout")) defer cancel() } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go index 8f4fc3850..1785a4bbb 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go @@ -20,7 +20,7 @@ import ( "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/resource" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.34.0" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" ) diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go index 1af257449..c0217af6b 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/version.go @@ -6,5 +6,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk" // Version is the current release version of the OpenTelemetry SDK in use. func Version() string { - return "1.36.0" + return "1.37.0" } diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/MIGRATION.md b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/MIGRATION.md new file mode 100644 index 000000000..02b56115e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/MIGRATION.md @@ -0,0 +1,4 @@ + +# Migration from v1.33.0 to v1.34.0 + +The `go.opentelemetry.io/otel/semconv/v1.34.0` package should be a drop-in replacement for `go.opentelemetry.io/otel/semconv/v1.33.0`. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/README.md new file mode 100644 index 000000000..fab06c975 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.34.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.34.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.34.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/attribute_group.go new file mode 100644 index 000000000..5b5666257 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/attribute_group.go @@ -0,0 +1,13851 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.34.0" + +import "go.opentelemetry.io/otel/attribute" + +// Namespace: android +const ( + // AndroidAppStateKey is the attribute Key conforming to the "android.app.state" + // semantic conventions. It represents the this attribute represents the state + // of the application. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "created" + // Note: The Android lifecycle states are defined in + // [Activity lifecycle callbacks], and from which the `OS identifiers` are + // derived. + // + // [Activity lifecycle callbacks]: https://developer.android.com/guide/components/activities/activity-lifecycle#lc + AndroidAppStateKey = attribute.Key("android.app.state") + + // AndroidOSAPILevelKey is the attribute Key conforming to the + // "android.os.api_level" semantic conventions. It represents the uniquely + // identifies the framework API revision offered by a version (`os.version`) of + // the android operating system. More information can be found [here]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "33", "32" + // + // [here]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels + AndroidOSAPILevelKey = attribute.Key("android.os.api_level") +) + +// AndroidOSAPILevel returns an attribute KeyValue conforming to the +// "android.os.api_level" semantic conventions. It represents the uniquely +// identifies the framework API revision offered by a version (`os.version`) of +// the android operating system. More information can be found [here]. +// +// [here]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels +func AndroidOSAPILevel(val string) attribute.KeyValue { + return AndroidOSAPILevelKey.String(val) +} + +// Enum values for android.app.state +var ( + // Any time before Activity.onResume() or, if the app has no Activity, + // Context.startService() has been called in the app for the first time. + // + // Stability: development + AndroidAppStateCreated = AndroidAppStateKey.String("created") + // Any time after Activity.onPause() or, if the app has no Activity, + // Context.stopService() has been called when the app was in the foreground + // state. + // + // Stability: development + AndroidAppStateBackground = AndroidAppStateKey.String("background") + // Any time after Activity.onResume() or, if the app has no Activity, + // Context.startService() has been called when the app was in either the created + // or background states. + // + // Stability: development + AndroidAppStateForeground = AndroidAppStateKey.String("foreground") +) + +// Namespace: app +const ( + // AppInstallationIDKey is the attribute Key conforming to the + // "app.installation.id" semantic conventions. It represents a unique identifier + // representing the installation of an application on a specific device. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2ab2916d-a51f-4ac8-80ee-45ac31a28092" + // Note: Its value SHOULD persist across launches of the same application + // installation, including through application upgrades. + // It SHOULD change if the application is uninstalled or if all applications of + // the vendor are uninstalled. + // Additionally, users might be able to reset this value (e.g. by clearing + // application data). + // If an app is installed multiple times on the same device (e.g. in different + // accounts on Android), each `app.installation.id` SHOULD have a different + // value. + // If multiple OpenTelemetry SDKs are used within the same application, they + // SHOULD use the same value for `app.installation.id`. + // Hardware IDs (e.g. serial number, IMEI, MAC address) MUST NOT be used as the + // `app.installation.id`. + // + // For iOS, this value SHOULD be equal to the [vendor identifier]. + // + // For Android, examples of `app.installation.id` implementations include: + // + // - [Firebase Installation ID]. + // - A globally unique UUID which is persisted across sessions in your + // application. + // - [App set ID]. + // - [`Settings.getString(Settings.Secure.ANDROID_ID)`]. + // + // More information about Android identifier best practices can be found [here] + // . + // + // [vendor identifier]: https://developer.apple.com/documentation/uikit/uidevice/identifierforvendor + // [Firebase Installation ID]: https://firebase.google.com/docs/projects/manage-installations + // [App set ID]: https://developer.android.com/identity/app-set-id + // [`Settings.getString(Settings.Secure.ANDROID_ID)`]: https://developer.android.com/reference/android/provider/Settings.Secure#ANDROID_ID + // [here]: https://developer.android.com/training/articles/user-data-ids + AppInstallationIDKey = attribute.Key("app.installation.id") + + // AppScreenCoordinateXKey is the attribute Key conforming to the + // "app.screen.coordinate.x" semantic conventions. It represents the x + // (horizontal) coordinate of a screen coordinate, in screen pixels. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0, 131 + AppScreenCoordinateXKey = attribute.Key("app.screen.coordinate.x") + + // AppScreenCoordinateYKey is the attribute Key conforming to the + // "app.screen.coordinate.y" semantic conventions. It represents the y + // (vertical) component of a screen coordinate, in screen pixels. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 12, 99 + AppScreenCoordinateYKey = attribute.Key("app.screen.coordinate.y") + + // AppWidgetIDKey is the attribute Key conforming to the "app.widget.id" + // semantic conventions. It represents an identifier that uniquely + // differentiates this widget from other widgets in the same application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "f9bc787d-ff05-48ad-90e1-fca1d46130b3", "submit_order_1829" + // Note: A widget is an application component, typically an on-screen visual GUI + // element. + AppWidgetIDKey = attribute.Key("app.widget.id") + + // AppWidgetNameKey is the attribute Key conforming to the "app.widget.name" + // semantic conventions. It represents the name of an application widget. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "submit", "attack", "Clear Cart" + // Note: A widget is an application component, typically an on-screen visual GUI + // element. + AppWidgetNameKey = attribute.Key("app.widget.name") +) + +// AppInstallationID returns an attribute KeyValue conforming to the +// "app.installation.id" semantic conventions. It represents a unique identifier +// representing the installation of an application on a specific device. +func AppInstallationID(val string) attribute.KeyValue { + return AppInstallationIDKey.String(val) +} + +// AppScreenCoordinateX returns an attribute KeyValue conforming to the +// "app.screen.coordinate.x" semantic conventions. It represents the x +// (horizontal) coordinate of a screen coordinate, in screen pixels. +func AppScreenCoordinateX(val int) attribute.KeyValue { + return AppScreenCoordinateXKey.Int(val) +} + +// AppScreenCoordinateY returns an attribute KeyValue conforming to the +// "app.screen.coordinate.y" semantic conventions. It represents the y (vertical) +// component of a screen coordinate, in screen pixels. +func AppScreenCoordinateY(val int) attribute.KeyValue { + return AppScreenCoordinateYKey.Int(val) +} + +// AppWidgetID returns an attribute KeyValue conforming to the "app.widget.id" +// semantic conventions. It represents an identifier that uniquely differentiates +// this widget from other widgets in the same application. +func AppWidgetID(val string) attribute.KeyValue { + return AppWidgetIDKey.String(val) +} + +// AppWidgetName returns an attribute KeyValue conforming to the +// "app.widget.name" semantic conventions. It represents the name of an +// application widget. +func AppWidgetName(val string) attribute.KeyValue { + return AppWidgetNameKey.String(val) +} + +// Namespace: artifact +const ( + // ArtifactAttestationFilenameKey is the attribute Key conforming to the + // "artifact.attestation.filename" semantic conventions. It represents the + // provenance filename of the built attestation which directly relates to the + // build artifact filename. This filename SHOULD accompany the artifact at + // publish time. See the [SLSA Relationship] specification for more information. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "golang-binary-amd64-v0.1.0.attestation", + // "docker-image-amd64-v0.1.0.intoto.json1", "release-1.tar.gz.attestation", + // "file-name-package.tar.gz.intoto.json1" + // + // [SLSA Relationship]: https://slsa.dev/spec/v1.0/distributing-provenance#relationship-between-artifacts-and-attestations + ArtifactAttestationFilenameKey = attribute.Key("artifact.attestation.filename") + + // ArtifactAttestationHashKey is the attribute Key conforming to the + // "artifact.attestation.hash" semantic conventions. It represents the full + // [hash value (see glossary)], of the built attestation. Some envelopes in the + // [software attestation space] also refer to this as the **digest**. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1b31dfcd5b7f9267bf2ff47651df1cfb9147b9e4df1f335accf65b4cda498408" + // + // [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf + // [software attestation space]: https://github.com/in-toto/attestation/tree/main/spec + ArtifactAttestationHashKey = attribute.Key("artifact.attestation.hash") + + // ArtifactAttestationIDKey is the attribute Key conforming to the + // "artifact.attestation.id" semantic conventions. It represents the id of the + // build [software attestation]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "123" + // + // [software attestation]: https://slsa.dev/attestation-model + ArtifactAttestationIDKey = attribute.Key("artifact.attestation.id") + + // ArtifactFilenameKey is the attribute Key conforming to the + // "artifact.filename" semantic conventions. It represents the human readable + // file name of the artifact, typically generated during build and release + // processes. Often includes the package name and version in the file name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "golang-binary-amd64-v0.1.0", "docker-image-amd64-v0.1.0", + // "release-1.tar.gz", "file-name-package.tar.gz" + // Note: This file name can also act as the [Package Name] + // in cases where the package ecosystem maps accordingly. + // Additionally, the artifact [can be published] + // for others, but that is not a guarantee. + // + // [Package Name]: https://slsa.dev/spec/v1.0/terminology#package-model + // [can be published]: https://slsa.dev/spec/v1.0/terminology#software-supply-chain + ArtifactFilenameKey = attribute.Key("artifact.filename") + + // ArtifactHashKey is the attribute Key conforming to the "artifact.hash" + // semantic conventions. It represents the full [hash value (see glossary)], + // often found in checksum.txt on a release of the artifact and used to verify + // package integrity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9ff4c52759e2c4ac70b7d517bc7fcdc1cda631ca0045271ddd1b192544f8a3e9" + // Note: The specific algorithm used to create the cryptographic hash value is + // not defined. In situations where an artifact has multiple + // cryptographic hashes, it is up to the implementer to choose which + // hash value to set here; this should be the most secure hash algorithm + // that is suitable for the situation and consistent with the + // corresponding attestation. The implementer can then provide the other + // hash values through an additional set of attribute extensions as they + // deem necessary. + // + // [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf + ArtifactHashKey = attribute.Key("artifact.hash") + + // ArtifactPurlKey is the attribute Key conforming to the "artifact.purl" + // semantic conventions. It represents the [Package URL] of the + // [package artifact] provides a standard way to identify and locate the + // packaged artifact. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "pkg:github/package-url/purl-spec@1209109710924", + // "pkg:npm/foo@12.12.3" + // + // [Package URL]: https://github.com/package-url/purl-spec + // [package artifact]: https://slsa.dev/spec/v1.0/terminology#package-model + ArtifactPurlKey = attribute.Key("artifact.purl") + + // ArtifactVersionKey is the attribute Key conforming to the "artifact.version" + // semantic conventions. It represents the version of the artifact. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "v0.1.0", "1.2.1", "122691-build" + ArtifactVersionKey = attribute.Key("artifact.version") +) + +// ArtifactAttestationFilename returns an attribute KeyValue conforming to the +// "artifact.attestation.filename" semantic conventions. It represents the +// provenance filename of the built attestation which directly relates to the +// build artifact filename. This filename SHOULD accompany the artifact at +// publish time. See the [SLSA Relationship] specification for more information. +// +// [SLSA Relationship]: https://slsa.dev/spec/v1.0/distributing-provenance#relationship-between-artifacts-and-attestations +func ArtifactAttestationFilename(val string) attribute.KeyValue { + return ArtifactAttestationFilenameKey.String(val) +} + +// ArtifactAttestationHash returns an attribute KeyValue conforming to the +// "artifact.attestation.hash" semantic conventions. It represents the full +// [hash value (see glossary)], of the built attestation. Some envelopes in the +// [software attestation space] also refer to this as the **digest**. +// +// [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf +// [software attestation space]: https://github.com/in-toto/attestation/tree/main/spec +func ArtifactAttestationHash(val string) attribute.KeyValue { + return ArtifactAttestationHashKey.String(val) +} + +// ArtifactAttestationID returns an attribute KeyValue conforming to the +// "artifact.attestation.id" semantic conventions. It represents the id of the +// build [software attestation]. +// +// [software attestation]: https://slsa.dev/attestation-model +func ArtifactAttestationID(val string) attribute.KeyValue { + return ArtifactAttestationIDKey.String(val) +} + +// ArtifactFilename returns an attribute KeyValue conforming to the +// "artifact.filename" semantic conventions. It represents the human readable +// file name of the artifact, typically generated during build and release +// processes. Often includes the package name and version in the file name. +func ArtifactFilename(val string) attribute.KeyValue { + return ArtifactFilenameKey.String(val) +} + +// ArtifactHash returns an attribute KeyValue conforming to the "artifact.hash" +// semantic conventions. It represents the full [hash value (see glossary)], +// often found in checksum.txt on a release of the artifact and used to verify +// package integrity. +// +// [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf +func ArtifactHash(val string) attribute.KeyValue { + return ArtifactHashKey.String(val) +} + +// ArtifactPurl returns an attribute KeyValue conforming to the "artifact.purl" +// semantic conventions. It represents the [Package URL] of the +// [package artifact] provides a standard way to identify and locate the packaged +// artifact. +// +// [Package URL]: https://github.com/package-url/purl-spec +// [package artifact]: https://slsa.dev/spec/v1.0/terminology#package-model +func ArtifactPurl(val string) attribute.KeyValue { + return ArtifactPurlKey.String(val) +} + +// ArtifactVersion returns an attribute KeyValue conforming to the +// "artifact.version" semantic conventions. It represents the version of the +// artifact. +func ArtifactVersion(val string) attribute.KeyValue { + return ArtifactVersionKey.String(val) +} + +// Namespace: aws +const ( + // AWSBedrockGuardrailIDKey is the attribute Key conforming to the + // "aws.bedrock.guardrail.id" semantic conventions. It represents the unique + // identifier of the AWS Bedrock Guardrail. A [guardrail] helps safeguard and + // prevent unwanted behavior from model responses or user messages. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "sgi5gkybzqak" + // + // [guardrail]: https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails.html + AWSBedrockGuardrailIDKey = attribute.Key("aws.bedrock.guardrail.id") + + // AWSBedrockKnowledgeBaseIDKey is the attribute Key conforming to the + // "aws.bedrock.knowledge_base.id" semantic conventions. It represents the + // unique identifier of the AWS Bedrock Knowledge base. A [knowledge base] is a + // bank of information that can be queried by models to generate more relevant + // responses and augment prompts. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "XFWUPB9PAW" + // + // [knowledge base]: https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base.html + AWSBedrockKnowledgeBaseIDKey = attribute.Key("aws.bedrock.knowledge_base.id") + + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to the + // "aws.dynamodb.attribute_definitions" semantic conventions. It represents the + // JSON-serialized value of each item in the `AttributeDefinitions` request + // field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "AttributeName": "string", "AttributeType": "string" }" + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "lives", "id" + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the value + // of the `ConsistentRead` request parameter. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, + // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": + // "string", "WriteCapacityUnits": number }" + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of the + // `Count` response parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents the + // value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Users", "CatsTable" + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_index_updates" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `GlobalSecondaryIndexUpdates` request field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }" + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") + + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to the + // "aws.dynamodb.global_secondary_indexes" semantic conventions. It represents + // the JSON-serialized value of each item of the `GlobalSecondaryIndexes` + // request field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }" + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value of + // the `IndexName` request parameter. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "name_to_group" + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to the + // "aws.dynamodb.item_collection_metrics" semantic conventions. It represents + // the JSON-serialized value of the `ItemCollectionMetrics` response field. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, + // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : + // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": + // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }" + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of the + // `Limit` request parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to the + // "aws.dynamodb.local_secondary_indexes" semantic conventions. It represents + // the JSON-serialized value of each item of the `LocalSecondaryIndexes` request + // field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "IndexArn": "string", "IndexName": "string", "IndexSizeBytes": + // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" } }" + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") + + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value of + // the `ProjectionExpression` request parameter. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Title", "Title, Price, Color", "Title, Description, RelatedItems, + // ProductReviews" + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.provisioned_read_capacity" semantic conventions. It represents + // the value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.provisioned_write_capacity" semantic conventions. It represents + // the value of the `ProvisionedThroughput.WriteCapacityUnits` request + // parameter. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the value of + // the `ScanIndexForward` request parameter. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") + + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the value of + // the `ScannedCount` response parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") + + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of the + // `Segment` request parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of the + // `Select` request parameter. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ALL_ATTRIBUTES", "COUNT" + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") + + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the number of + // items in the `TableNames` response parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") + + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys in + // the `RequestItems` object field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Users", "Cats" + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") + + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the value + // of the `TotalSegments` request parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") + + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS cluster]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster" + // + // [ECS cluster]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container instance]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9" + // + // [ECS container instance]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch type] + // for an ECS task. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [launch type]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + + // AWSECSTaskARNKey is the attribute Key conforming to the "aws.ecs.task.arn" + // semantic conventions. It represents the ARN of a running [ECS task]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b", + // "arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd" + // + // [ECS task]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the family name of + // the [ECS task definition] used to create the ECS task. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-family" + // + // [ECS task definition]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + + // AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id" + // semantic conventions. It represents the ID of a running ECS task. The ID MUST + // be extracted from `task.arn`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "10838bed-421f-43ef-870a-f43feacbbb5b", + // "23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd" + AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id") + + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision for + // the task definition used to create the ECS task. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "8", "26" + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") + + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS + // cluster. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster" + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") + + // AWSExtendedRequestIDKey is the attribute Key conforming to the + // "aws.extended_request_id" semantic conventions. It represents the AWS + // extended request ID as returned in the response header `x-amz-id-2`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "wzHcyEWfmOGDIE5QOhTAqFDoDWP3y8IUvpNINCwL9N4TEHbUw0/gZJ+VZTmCNCWR7fezEN3eCiQ=" + AWSExtendedRequestIDKey = attribute.Key("aws.extended_request_id") + + // AWSKinesisStreamNameKey is the attribute Key conforming to the + // "aws.kinesis.stream_name" semantic conventions. It represents the name of the + // AWS Kinesis [stream] the request refers to. Corresponds to the + // `--stream-name` parameter of the Kinesis [describe-stream] operation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "some-stream-name" + // + // [stream]: https://docs.aws.amazon.com/streams/latest/dev/introduction.html + // [describe-stream]: https://docs.aws.amazon.com/cli/latest/reference/kinesis/describe-stream.html + AWSKinesisStreamNameKey = attribute.Key("aws.kinesis.stream_name") + + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full invoked + // ARN as provided on the `Context` passed to the function ( + // `Lambda-Runtime-Invoked-Function-Arn` header on the + // `/runtime/invocation/next` applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:lambda:us-east-1:123456:function:myfunction:myalias" + // Note: This may be different from `cloud.resource_id` if an alias is involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") + + // AWSLambdaResourceMappingIDKey is the attribute Key conforming to the + // "aws.lambda.resource_mapping.id" semantic conventions. It represents the UUID + // of the [AWS Lambda EvenSource Mapping]. An event source is mapped to a lambda + // function. It's contents are read by Lambda and used to trigger a function. + // This isn't available in the lambda execution context or the lambda runtime + // environtment. This is going to be populated by the AWS SDK for each language + // when that UUID is present. Some of these operations are + // Create/Delete/Get/List/Update EventSourceMapping. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "587ad24b-03b9-4413-8202-bbd56b36e5b7" + // + // [AWS Lambda EvenSource Mapping]: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-eventsourcemapping.html + AWSLambdaResourceMappingIDKey = attribute.Key("aws.lambda.resource_mapping.id") + + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon Resource + // Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*" + // Note: See the [log group ARN format documentation]. + // + // [log group ARN format documentation]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of the + // AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/aws/lambda/my-function", "opentelemetry-service" + // Note: Multiple log groups must be supported for cases like multi-container + // applications, where a single application has sidecar containers, and each + // write to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the + // AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b" + // Note: See the [log stream ARN format documentation]. One log group can + // contain several log streams, so these ARNs necessarily identify both a log + // group and a log stream. + // + // [log stream ARN format documentation]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) of the + // AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "logs/main/10838bed-421f-43ef-870a-f43feacbbb5b" + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") + + // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" + // semantic conventions. It represents the AWS request ID as returned in the + // response headers `x-amzn-requestid`, `x-amzn-request-id` or + // `x-amz-request-id`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "79b9da39-b7ae-508a-a6bc-864b2829c622", "C9ER4AJX75574TDJ" + AWSRequestIDKey = attribute.Key("aws.request_id") + + // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" + // semantic conventions. It represents the S3 bucket name the request refers to. + // Corresponds to the `--bucket` parameter of the [S3 API] operations. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "some-bucket-name" + // Note: The `bucket` attribute is applicable to all S3 operations that + // reference a bucket, i.e. that require the bucket name as a mandatory + // parameter. + // This applies to almost all S3 operations except `list-buckets`. + // + // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html + AWSS3BucketKey = attribute.Key("aws.s3.bucket") + + // AWSS3CopySourceKey is the attribute Key conforming to the + // "aws.s3.copy_source" semantic conventions. It represents the source object + // (in the form `bucket`/`key`) for the copy operation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "someFile.yml" + // Note: The `copy_source` attribute applies to S3 copy operations and + // corresponds to the `--copy-source` parameter + // of the [copy-object operation within the S3 API]. + // This applies in particular to the following operations: + // + // - [copy-object] + // - [upload-part-copy] + // + // + // [copy-object operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html + // [copy-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html + // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html + AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") + + // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" + // semantic conventions. It represents the delete request container that + // specifies the objects to be deleted. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "Objects=[{Key=string,VersionId=string},{Key=string,VersionId=string}],Quiet=boolean" + // Note: The `delete` attribute is only applicable to the [delete-object] + // operation. + // The `delete` attribute corresponds to the `--delete` parameter of the + // [delete-objects operation within the S3 API]. + // + // [delete-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html + // [delete-objects operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html + AWSS3DeleteKey = attribute.Key("aws.s3.delete") + + // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic + // conventions. It represents the S3 object key the request refers to. + // Corresponds to the `--key` parameter of the [S3 API] operations. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "someFile.yml" + // Note: The `key` attribute is applicable to all object-related S3 operations, + // i.e. that require the object key as a mandatory parameter. + // This applies in particular to the following operations: + // + // - [copy-object] + // - [delete-object] + // - [get-object] + // - [head-object] + // - [put-object] + // - [restore-object] + // - [select-object-content] + // - [abort-multipart-upload] + // - [complete-multipart-upload] + // - [create-multipart-upload] + // - [list-parts] + // - [upload-part] + // - [upload-part-copy] + // + // + // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html + // [copy-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html + // [delete-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html + // [get-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html + // [head-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html + // [put-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html + // [restore-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html + // [select-object-content]: https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html + // [abort-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html + // [complete-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html + // [create-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html + // [list-parts]: https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html + // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html + // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html + AWSS3KeyKey = attribute.Key("aws.s3.key") + + // AWSS3PartNumberKey is the attribute Key conforming to the + // "aws.s3.part_number" semantic conventions. It represents the part number of + // the part being uploaded in a multipart-upload operation. This is a positive + // integer between 1 and 10,000. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 3456 + // Note: The `part_number` attribute is only applicable to the [upload-part] + // and [upload-part-copy] operations. + // The `part_number` attribute corresponds to the `--part-number` parameter of + // the + // [upload-part operation within the S3 API]. + // + // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html + // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html + // [upload-part operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html + AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") + + // AWSS3UploadIDKey is the attribute Key conforming to the "aws.s3.upload_id" + // semantic conventions. It represents the upload ID that identifies the + // multipart upload. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ" + // Note: The `upload_id` attribute applies to S3 multipart-upload operations and + // corresponds to the `--upload-id` parameter + // of the [S3 API] multipart operations. + // This applies in particular to the following operations: + // + // - [abort-multipart-upload] + // - [complete-multipart-upload] + // - [list-parts] + // - [upload-part] + // - [upload-part-copy] + // + // + // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html + // [abort-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html + // [complete-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html + // [list-parts]: https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html + // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html + // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html + AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") + + // AWSSecretsmanagerSecretARNKey is the attribute Key conforming to the + // "aws.secretsmanager.secret.arn" semantic conventions. It represents the ARN + // of the Secret stored in the Secrets Mangger. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:secretsmanager:us-east-1:123456789012:secret:SecretName-6RandomCharacters" + AWSSecretsmanagerSecretARNKey = attribute.Key("aws.secretsmanager.secret.arn") + + // AWSSNSTopicARNKey is the attribute Key conforming to the "aws.sns.topic.arn" + // semantic conventions. It represents the ARN of the AWS SNS Topic. An Amazon + // SNS [topic] is a logical access point that acts as a communication channel. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:sns:us-east-1:123456789012:mystack-mytopic-NZJ5JSMVGFIE" + // + // [topic]: https://docs.aws.amazon.com/sns/latest/dg/sns-create-topic.html + AWSSNSTopicARNKey = attribute.Key("aws.sns.topic.arn") + + // AWSSQSQueueURLKey is the attribute Key conforming to the "aws.sqs.queue.url" + // semantic conventions. It represents the URL of the AWS SQS Queue. It's a + // unique identifier for a queue in Amazon Simple Queue Service (SQS) and is + // used to access the queue and perform actions on it. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://sqs.us-east-1.amazonaws.com/123456789012/MyQueue" + AWSSQSQueueURLKey = attribute.Key("aws.sqs.queue.url") + + // AWSStepFunctionsActivityARNKey is the attribute Key conforming to the + // "aws.step_functions.activity.arn" semantic conventions. It represents the ARN + // of the AWS Step Functions Activity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:states:us-east-1:123456789012:activity:get-greeting" + AWSStepFunctionsActivityARNKey = attribute.Key("aws.step_functions.activity.arn") + + // AWSStepFunctionsStateMachineARNKey is the attribute Key conforming to the + // "aws.step_functions.state_machine.arn" semantic conventions. It represents + // the ARN of the AWS Step Functions State Machine. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:states:us-east-1:123456789012:stateMachine:myStateMachine:1" + AWSStepFunctionsStateMachineARNKey = attribute.Key("aws.step_functions.state_machine.arn") +) + +// AWSBedrockGuardrailID returns an attribute KeyValue conforming to the +// "aws.bedrock.guardrail.id" semantic conventions. It represents the unique +// identifier of the AWS Bedrock Guardrail. A [guardrail] helps safeguard and +// prevent unwanted behavior from model responses or user messages. +// +// [guardrail]: https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails.html +func AWSBedrockGuardrailID(val string) attribute.KeyValue { + return AWSBedrockGuardrailIDKey.String(val) +} + +// AWSBedrockKnowledgeBaseID returns an attribute KeyValue conforming to the +// "aws.bedrock.knowledge_base.id" semantic conventions. It represents the unique +// identifier of the AWS Bedrock Knowledge base. A [knowledge base] is a bank of +// information that can be queried by models to generate more relevant responses +// and augment prompts. +// +// [knowledge base]: https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base.html +func AWSBedrockKnowledgeBaseID(val string) attribute.KeyValue { + return AWSBedrockKnowledgeBaseIDKey.String(val) +} + +// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming to +// the "aws.dynamodb.attribute_definitions" semantic conventions. It represents +// the JSON-serialized value of each item in the `AttributeDefinitions` request +// field. +func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) +} + +// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to the +// "aws.dynamodb.attributes_to_get" semantic conventions. It represents the value +// of the `AttributesToGet` request parameter. +func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributesToGetKey.StringSlice(val) +} + +// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the +// "aws.dynamodb.consistent_read" semantic conventions. It represents the value +// of the `ConsistentRead` request parameter. +func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { + return AWSDynamoDBConsistentReadKey.Bool(val) +} + +// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to the +// "aws.dynamodb.consumed_capacity" semantic conventions. It represents the +// JSON-serialized value of each item in the `ConsumedCapacity` response field. +func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { + return AWSDynamoDBConsumedCapacityKey.StringSlice(val) +} + +// AWSDynamoDBCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.count" semantic conventions. It represents the value of the +// `Count` response parameter. +func AWSDynamoDBCount(val int) attribute.KeyValue { + return AWSDynamoDBCountKey.Int(val) +} + +// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming to the +// "aws.dynamodb.exclusive_start_table" semantic conventions. It represents the +// value of the `ExclusiveStartTableName` request parameter. +func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { + return AWSDynamoDBExclusiveStartTableKey.String(val) +} + +// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic +// conventions. It represents the JSON-serialized value of each item in the +// `GlobalSecondaryIndexUpdates` request field. +func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) +} + +// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue conforming to +// the "aws.dynamodb.global_secondary_indexes" semantic conventions. It +// represents the JSON-serialized value of each item of the +// `GlobalSecondaryIndexes` request field. +func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the +// "aws.dynamodb.index_name" semantic conventions. It represents the value of the +// `IndexName` request parameter. +func AWSDynamoDBIndexName(val string) attribute.KeyValue { + return AWSDynamoDBIndexNameKey.String(val) +} + +// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming to +// the "aws.dynamodb.item_collection_metrics" semantic conventions. It represents +// the JSON-serialized value of the `ItemCollectionMetrics` response field. +func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { + return AWSDynamoDBItemCollectionMetricsKey.String(val) +} + +// AWSDynamoDBLimit returns an attribute KeyValue conforming to the +// "aws.dynamodb.limit" semantic conventions. It represents the value of the +// `Limit` request parameter. +func AWSDynamoDBLimit(val int) attribute.KeyValue { + return AWSDynamoDBLimitKey.Int(val) +} + +// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming to +// the "aws.dynamodb.local_secondary_indexes" semantic conventions. It represents +// the JSON-serialized value of each item of the `LocalSecondaryIndexes` request +// field. +func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBProjection returns an attribute KeyValue conforming to the +// "aws.dynamodb.projection" semantic conventions. It represents the value of the +// `ProjectionExpression` request parameter. +func AWSDynamoDBProjection(val string) attribute.KeyValue { + return AWSDynamoDBProjectionKey.String(val) +} + +// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue conforming to +// the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It +// represents the value of the `ProvisionedThroughput.ReadCapacityUnits` request +// parameter. +func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) +} + +// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue conforming +// to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. It +// represents the value of the `ProvisionedThroughput.WriteCapacityUnits` request +// parameter. +func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) +} + +// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the +// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of +// the `ScanIndexForward` request parameter. +func AWSDynamoDBScanForward(val bool) attribute.KeyValue { + return AWSDynamoDBScanForwardKey.Bool(val) +} + +// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.scanned_count" semantic conventions. It represents the value of +// the `ScannedCount` response parameter. +func AWSDynamoDBScannedCount(val int) attribute.KeyValue { + return AWSDynamoDBScannedCountKey.Int(val) +} + +// AWSDynamoDBSegment returns an attribute KeyValue conforming to the +// "aws.dynamodb.segment" semantic conventions. It represents the value of the +// `Segment` request parameter. +func AWSDynamoDBSegment(val int) attribute.KeyValue { + return AWSDynamoDBSegmentKey.Int(val) +} + +// AWSDynamoDBSelect returns an attribute KeyValue conforming to the +// "aws.dynamodb.select" semantic conventions. It represents the value of the +// `Select` request parameter. +func AWSDynamoDBSelect(val string) attribute.KeyValue { + return AWSDynamoDBSelectKey.String(val) +} + +// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_count" semantic conventions. It represents the number of +// items in the `TableNames` response parameter. +func AWSDynamoDBTableCount(val int) attribute.KeyValue { + return AWSDynamoDBTableCountKey.Int(val) +} + +// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_names" semantic conventions. It represents the keys in the +// `RequestItems` object field. +func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { + return AWSDynamoDBTableNamesKey.StringSlice(val) +} + +// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the +// "aws.dynamodb.total_segments" semantic conventions. It represents the value of +// the `TotalSegments` request parameter. +func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { + return AWSDynamoDBTotalSegmentsKey.Int(val) +} + +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an +// [ECS cluster]. +// +// [ECS cluster]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) +} + +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container instance]. +// +// [ECS container instance]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) +} + +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running +// [ECS task]. +// +// [ECS task]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) +} + +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the family name of +// the [ECS task definition] used to create the ECS task. +// +// [ECS task definition]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskID returns an attribute KeyValue conforming to the "aws.ecs.task.id" +// semantic conventions. It represents the ID of a running ECS task. The ID MUST +// be extracted from `task.arn`. +func AWSECSTaskID(val string) attribute.KeyValue { + return AWSECSTaskIDKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// the task definition used to create the ECS task. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + +// AWSExtendedRequestID returns an attribute KeyValue conforming to the +// "aws.extended_request_id" semantic conventions. It represents the AWS extended +// request ID as returned in the response header `x-amz-id-2`. +func AWSExtendedRequestID(val string) attribute.KeyValue { + return AWSExtendedRequestIDKey.String(val) +} + +// AWSKinesisStreamName returns an attribute KeyValue conforming to the +// "aws.kinesis.stream_name" semantic conventions. It represents the name of the +// AWS Kinesis [stream] the request refers to. Corresponds to the `--stream-name` +// parameter of the Kinesis [describe-stream] operation. +// +// [stream]: https://docs.aws.amazon.com/streams/latest/dev/introduction.html +// [describe-stream]: https://docs.aws.amazon.com/cli/latest/reference/kinesis/describe-stream.html +func AWSKinesisStreamName(val string) attribute.KeyValue { + return AWSKinesisStreamNameKey.String(val) +} + +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full invoked +// ARN as provided on the `Context` passed to the function ( +// `Lambda-Runtime-Invoked-Function-Arn` header on the `/runtime/invocation/next` +// applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// AWSLambdaResourceMappingID returns an attribute KeyValue conforming to the +// "aws.lambda.resource_mapping.id" semantic conventions. It represents the UUID +// of the [AWS Lambda EvenSource Mapping]. An event source is mapped to a lambda +// function. It's contents are read by Lambda and used to trigger a function. +// This isn't available in the lambda execution context or the lambda runtime +// environtment. This is going to be populated by the AWS SDK for each language +// when that UUID is present. Some of these operations are +// Create/Delete/Get/List/Update EventSourceMapping. +// +// [AWS Lambda EvenSource Mapping]: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-eventsourcemapping.html +func AWSLambdaResourceMappingID(val string) attribute.KeyValue { + return AWSLambdaResourceMappingIDKey.String(val) +} + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of the +// AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// AWSRequestID returns an attribute KeyValue conforming to the "aws.request_id" +// semantic conventions. It represents the AWS request ID as returned in the +// response headers `x-amzn-requestid`, `x-amzn-request-id` or `x-amz-request-id` +// . +func AWSRequestID(val string) attribute.KeyValue { + return AWSRequestIDKey.String(val) +} + +// AWSS3Bucket returns an attribute KeyValue conforming to the "aws.s3.bucket" +// semantic conventions. It represents the S3 bucket name the request refers to. +// Corresponds to the `--bucket` parameter of the [S3 API] operations. +// +// [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html +func AWSS3Bucket(val string) attribute.KeyValue { + return AWSS3BucketKey.String(val) +} + +// AWSS3CopySource returns an attribute KeyValue conforming to the +// "aws.s3.copy_source" semantic conventions. It represents the source object (in +// the form `bucket`/`key`) for the copy operation. +func AWSS3CopySource(val string) attribute.KeyValue { + return AWSS3CopySourceKey.String(val) +} + +// AWSS3Delete returns an attribute KeyValue conforming to the "aws.s3.delete" +// semantic conventions. It represents the delete request container that +// specifies the objects to be deleted. +func AWSS3Delete(val string) attribute.KeyValue { + return AWSS3DeleteKey.String(val) +} + +// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" semantic +// conventions. It represents the S3 object key the request refers to. +// Corresponds to the `--key` parameter of the [S3 API] operations. +// +// [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html +func AWSS3Key(val string) attribute.KeyValue { + return AWSS3KeyKey.String(val) +} + +// AWSS3PartNumber returns an attribute KeyValue conforming to the +// "aws.s3.part_number" semantic conventions. It represents the part number of +// the part being uploaded in a multipart-upload operation. This is a positive +// integer between 1 and 10,000. +func AWSS3PartNumber(val int) attribute.KeyValue { + return AWSS3PartNumberKey.Int(val) +} + +// AWSS3UploadID returns an attribute KeyValue conforming to the +// "aws.s3.upload_id" semantic conventions. It represents the upload ID that +// identifies the multipart upload. +func AWSS3UploadID(val string) attribute.KeyValue { + return AWSS3UploadIDKey.String(val) +} + +// AWSSecretsmanagerSecretARN returns an attribute KeyValue conforming to the +// "aws.secretsmanager.secret.arn" semantic conventions. It represents the ARN of +// the Secret stored in the Secrets Mangger. +func AWSSecretsmanagerSecretARN(val string) attribute.KeyValue { + return AWSSecretsmanagerSecretARNKey.String(val) +} + +// AWSSNSTopicARN returns an attribute KeyValue conforming to the +// "aws.sns.topic.arn" semantic conventions. It represents the ARN of the AWS SNS +// Topic. An Amazon SNS [topic] is a logical access point that acts as a +// communication channel. +// +// [topic]: https://docs.aws.amazon.com/sns/latest/dg/sns-create-topic.html +func AWSSNSTopicARN(val string) attribute.KeyValue { + return AWSSNSTopicARNKey.String(val) +} + +// AWSSQSQueueURL returns an attribute KeyValue conforming to the +// "aws.sqs.queue.url" semantic conventions. It represents the URL of the AWS SQS +// Queue. It's a unique identifier for a queue in Amazon Simple Queue Service +// (SQS) and is used to access the queue and perform actions on it. +func AWSSQSQueueURL(val string) attribute.KeyValue { + return AWSSQSQueueURLKey.String(val) +} + +// AWSStepFunctionsActivityARN returns an attribute KeyValue conforming to the +// "aws.step_functions.activity.arn" semantic conventions. It represents the ARN +// of the AWS Step Functions Activity. +func AWSStepFunctionsActivityARN(val string) attribute.KeyValue { + return AWSStepFunctionsActivityARNKey.String(val) +} + +// AWSStepFunctionsStateMachineARN returns an attribute KeyValue conforming to +// the "aws.step_functions.state_machine.arn" semantic conventions. It represents +// the ARN of the AWS Step Functions State Machine. +func AWSStepFunctionsStateMachineARN(val string) attribute.KeyValue { + return AWSStepFunctionsStateMachineARNKey.String(val) +} + +// Enum values for aws.ecs.launchtype +var ( + // ec2 + // Stability: development + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + // Stability: development + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// Namespace: az +const ( + // AzNamespaceKey is the attribute Key conforming to the "az.namespace" semantic + // conventions. It represents the [Azure Resource Provider Namespace] as + // recognized by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Microsoft.Storage", "Microsoft.KeyVault", "Microsoft.ServiceBus" + // + // [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers + AzNamespaceKey = attribute.Key("az.namespace") + + // AzServiceRequestIDKey is the attribute Key conforming to the + // "az.service_request_id" semantic conventions. It represents the unique + // identifier of the service request. It's generated by the Azure service and + // returned with the response. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "00000000-0000-0000-0000-000000000000" + AzServiceRequestIDKey = attribute.Key("az.service_request_id") +) + +// AzNamespace returns an attribute KeyValue conforming to the "az.namespace" +// semantic conventions. It represents the [Azure Resource Provider Namespace] as +// recognized by the client. +// +// [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers +func AzNamespace(val string) attribute.KeyValue { + return AzNamespaceKey.String(val) +} + +// AzServiceRequestID returns an attribute KeyValue conforming to the +// "az.service_request_id" semantic conventions. It represents the unique +// identifier of the service request. It's generated by the Azure service and +// returned with the response. +func AzServiceRequestID(val string) attribute.KeyValue { + return AzServiceRequestIDKey.String(val) +} + +// Namespace: azure +const ( + // AzureClientIDKey is the attribute Key conforming to the "azure.client.id" + // semantic conventions. It represents the unique identifier of the client + // instance. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "3ba4827d-4422-483f-b59f-85b74211c11d", "storage-client-1" + AzureClientIDKey = attribute.Key("azure.client.id") + + // AzureCosmosDBConnectionModeKey is the attribute Key conforming to the + // "azure.cosmosdb.connection.mode" semantic conventions. It represents the + // cosmos client connection mode. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + AzureCosmosDBConnectionModeKey = attribute.Key("azure.cosmosdb.connection.mode") + + // AzureCosmosDBConsistencyLevelKey is the attribute Key conforming to the + // "azure.cosmosdb.consistency.level" semantic conventions. It represents the + // account or request [consistency level]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Eventual", "ConsistentPrefix", "BoundedStaleness", "Strong", + // "Session" + // + // [consistency level]: https://learn.microsoft.com/azure/cosmos-db/consistency-levels + AzureCosmosDBConsistencyLevelKey = attribute.Key("azure.cosmosdb.consistency.level") + + // AzureCosmosDBOperationContactedRegionsKey is the attribute Key conforming to + // the "azure.cosmosdb.operation.contacted_regions" semantic conventions. It + // represents the list of regions contacted during operation in the order that + // they were contacted. If there is more than one region listed, it indicates + // that the operation was performed on multiple regions i.e. cross-regional + // call. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "North Central US", "Australia East", "Australia Southeast" + // Note: Region name matches the format of `displayName` in [Azure Location API] + // + // [Azure Location API]: https://learn.microsoft.com/rest/api/subscription/subscriptions/list-locations?view=rest-subscription-2021-10-01&tabs=HTTP#location + AzureCosmosDBOperationContactedRegionsKey = attribute.Key("azure.cosmosdb.operation.contacted_regions") + + // AzureCosmosDBOperationRequestChargeKey is the attribute Key conforming to the + // "azure.cosmosdb.operation.request_charge" semantic conventions. It represents + // the number of request units consumed by the operation. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 46.18, 1.0 + AzureCosmosDBOperationRequestChargeKey = attribute.Key("azure.cosmosdb.operation.request_charge") + + // AzureCosmosDBRequestBodySizeKey is the attribute Key conforming to the + // "azure.cosmosdb.request.body.size" semantic conventions. It represents the + // request payload size in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + AzureCosmosDBRequestBodySizeKey = attribute.Key("azure.cosmosdb.request.body.size") + + // AzureCosmosDBResponseSubStatusCodeKey is the attribute Key conforming to the + // "azure.cosmosdb.response.sub_status_code" semantic conventions. It represents + // the cosmos DB sub status code. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1000, 1002 + AzureCosmosDBResponseSubStatusCodeKey = attribute.Key("azure.cosmosdb.response.sub_status_code") +) + +// AzureClientID returns an attribute KeyValue conforming to the +// "azure.client.id" semantic conventions. It represents the unique identifier of +// the client instance. +func AzureClientID(val string) attribute.KeyValue { + return AzureClientIDKey.String(val) +} + +// AzureCosmosDBOperationContactedRegions returns an attribute KeyValue +// conforming to the "azure.cosmosdb.operation.contacted_regions" semantic +// conventions. It represents the list of regions contacted during operation in +// the order that they were contacted. If there is more than one region listed, +// it indicates that the operation was performed on multiple regions i.e. +// cross-regional call. +func AzureCosmosDBOperationContactedRegions(val ...string) attribute.KeyValue { + return AzureCosmosDBOperationContactedRegionsKey.StringSlice(val) +} + +// AzureCosmosDBOperationRequestCharge returns an attribute KeyValue conforming +// to the "azure.cosmosdb.operation.request_charge" semantic conventions. It +// represents the number of request units consumed by the operation. +func AzureCosmosDBOperationRequestCharge(val float64) attribute.KeyValue { + return AzureCosmosDBOperationRequestChargeKey.Float64(val) +} + +// AzureCosmosDBRequestBodySize returns an attribute KeyValue conforming to the +// "azure.cosmosdb.request.body.size" semantic conventions. It represents the +// request payload size in bytes. +func AzureCosmosDBRequestBodySize(val int) attribute.KeyValue { + return AzureCosmosDBRequestBodySizeKey.Int(val) +} + +// AzureCosmosDBResponseSubStatusCode returns an attribute KeyValue conforming to +// the "azure.cosmosdb.response.sub_status_code" semantic conventions. It +// represents the cosmos DB sub status code. +func AzureCosmosDBResponseSubStatusCode(val int) attribute.KeyValue { + return AzureCosmosDBResponseSubStatusCodeKey.Int(val) +} + +// Enum values for azure.cosmosdb.connection.mode +var ( + // Gateway (HTTP) connection. + // Stability: development + AzureCosmosDBConnectionModeGateway = AzureCosmosDBConnectionModeKey.String("gateway") + // Direct connection. + // Stability: development + AzureCosmosDBConnectionModeDirect = AzureCosmosDBConnectionModeKey.String("direct") +) + +// Enum values for azure.cosmosdb.consistency.level +var ( + // strong + // Stability: development + AzureCosmosDBConsistencyLevelStrong = AzureCosmosDBConsistencyLevelKey.String("Strong") + // bounded_staleness + // Stability: development + AzureCosmosDBConsistencyLevelBoundedStaleness = AzureCosmosDBConsistencyLevelKey.String("BoundedStaleness") + // session + // Stability: development + AzureCosmosDBConsistencyLevelSession = AzureCosmosDBConsistencyLevelKey.String("Session") + // eventual + // Stability: development + AzureCosmosDBConsistencyLevelEventual = AzureCosmosDBConsistencyLevelKey.String("Eventual") + // consistent_prefix + // Stability: development + AzureCosmosDBConsistencyLevelConsistentPrefix = AzureCosmosDBConsistencyLevelKey.String("ConsistentPrefix") +) + +// Namespace: browser +const ( + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: " Not A;Brand 99", "Chromium 99", "Chrome 99" + // Note: This value is intended to be taken from the [UA client hints API] ( + // `navigator.userAgentData.brands`). + // + // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface + BrowserBrandsKey = attribute.Key("browser.brands") + + // BrowserLanguageKey is the attribute Key conforming to the "browser.language" + // semantic conventions. It represents the preferred language of the user using + // the browser. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "en", "en-US", "fr", "fr-FR" + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") + + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the browser is + // running on a mobile device. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: This value is intended to be taken from the [UA client hints API] ( + // `navigator.userAgentData.mobile`). If unavailable, this attribute SHOULD be + // left unset. + // + // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface + BrowserMobileKey = attribute.Key("browser.mobile") + + // BrowserPlatformKey is the attribute Key conforming to the "browser.platform" + // semantic conventions. It represents the platform on which the browser is + // running. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Windows", "macOS", "Android" + // Note: This value is intended to be taken from the [UA client hints API] ( + // `navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute SHOULD + // be left unset in order for the values to be consistent. + // The list of possible values is defined in the + // [W3C User-Agent Client Hints specification]. Note that some (but not all) of + // these values can overlap with values in the + // [`os.type` and `os.name` attributes]. However, for consistency, the values in + // the `browser.platform` attribute should capture the exact value that the user + // agent provides. + // + // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface + // [W3C User-Agent Client Hints specification]: https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform + // [`os.type` and `os.name` attributes]: ./os.md + BrowserPlatformKey = attribute.Key("browser.platform") +) + +// BrowserBrands returns an attribute KeyValue conforming to the "browser.brands" +// semantic conventions. It represents the array of brand name and version +// separated by a space. +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred language +// of the user using the browser. +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the "browser.mobile" +// semantic conventions. It represents a boolean that is true if the browser is +// running on a mobile device. +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running. +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// Namespace: cassandra +const ( + // CassandraConsistencyLevelKey is the attribute Key conforming to the + // "cassandra.consistency.level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from [CQL]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [CQL]: https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html + CassandraConsistencyLevelKey = attribute.Key("cassandra.consistency.level") + + // CassandraCoordinatorDCKey is the attribute Key conforming to the + // "cassandra.coordinator.dc" semantic conventions. It represents the data + // center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: us-west-2 + CassandraCoordinatorDCKey = attribute.Key("cassandra.coordinator.dc") + + // CassandraCoordinatorIDKey is the attribute Key conforming to the + // "cassandra.coordinator.id" semantic conventions. It represents the ID of the + // coordinating node for a query. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: be13faa2-8574-4d71-926d-27f16cf8a7af + CassandraCoordinatorIDKey = attribute.Key("cassandra.coordinator.id") + + // CassandraPageSizeKey is the attribute Key conforming to the + // "cassandra.page.size" semantic conventions. It represents the fetch size used + // for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 5000 + CassandraPageSizeKey = attribute.Key("cassandra.page.size") + + // CassandraQueryIdempotentKey is the attribute Key conforming to the + // "cassandra.query.idempotent" semantic conventions. It represents the whether + // or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + CassandraQueryIdempotentKey = attribute.Key("cassandra.query.idempotent") + + // CassandraSpeculativeExecutionCountKey is the attribute Key conforming to the + // "cassandra.speculative_execution.count" semantic conventions. It represents + // the number of times a query was speculatively executed. Not set or `0` if the + // query was not executed speculatively. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0, 2 + CassandraSpeculativeExecutionCountKey = attribute.Key("cassandra.speculative_execution.count") +) + +// CassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "cassandra.coordinator.dc" semantic conventions. It represents the data center +// of the coordinating node for a query. +func CassandraCoordinatorDC(val string) attribute.KeyValue { + return CassandraCoordinatorDCKey.String(val) +} + +// CassandraCoordinatorID returns an attribute KeyValue conforming to the +// "cassandra.coordinator.id" semantic conventions. It represents the ID of the +// coordinating node for a query. +func CassandraCoordinatorID(val string) attribute.KeyValue { + return CassandraCoordinatorIDKey.String(val) +} + +// CassandraPageSize returns an attribute KeyValue conforming to the +// "cassandra.page.size" semantic conventions. It represents the fetch size used +// for paging, i.e. how many rows will be returned at once. +func CassandraPageSize(val int) attribute.KeyValue { + return CassandraPageSizeKey.Int(val) +} + +// CassandraQueryIdempotent returns an attribute KeyValue conforming to the +// "cassandra.query.idempotent" semantic conventions. It represents the whether +// or not the query is idempotent. +func CassandraQueryIdempotent(val bool) attribute.KeyValue { + return CassandraQueryIdempotentKey.Bool(val) +} + +// CassandraSpeculativeExecutionCount returns an attribute KeyValue conforming to +// the "cassandra.speculative_execution.count" semantic conventions. It +// represents the number of times a query was speculatively executed. Not set or +// `0` if the query was not executed speculatively. +func CassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return CassandraSpeculativeExecutionCountKey.Int(val) +} + +// Enum values for cassandra.consistency.level +var ( + // all + // Stability: development + CassandraConsistencyLevelAll = CassandraConsistencyLevelKey.String("all") + // each_quorum + // Stability: development + CassandraConsistencyLevelEachQuorum = CassandraConsistencyLevelKey.String("each_quorum") + // quorum + // Stability: development + CassandraConsistencyLevelQuorum = CassandraConsistencyLevelKey.String("quorum") + // local_quorum + // Stability: development + CassandraConsistencyLevelLocalQuorum = CassandraConsistencyLevelKey.String("local_quorum") + // one + // Stability: development + CassandraConsistencyLevelOne = CassandraConsistencyLevelKey.String("one") + // two + // Stability: development + CassandraConsistencyLevelTwo = CassandraConsistencyLevelKey.String("two") + // three + // Stability: development + CassandraConsistencyLevelThree = CassandraConsistencyLevelKey.String("three") + // local_one + // Stability: development + CassandraConsistencyLevelLocalOne = CassandraConsistencyLevelKey.String("local_one") + // any + // Stability: development + CassandraConsistencyLevelAny = CassandraConsistencyLevelKey.String("any") + // serial + // Stability: development + CassandraConsistencyLevelSerial = CassandraConsistencyLevelKey.String("serial") + // local_serial + // Stability: development + CassandraConsistencyLevelLocalSerial = CassandraConsistencyLevelKey.String("local_serial") +) + +// Namespace: cicd +const ( + // CICDPipelineActionNameKey is the attribute Key conforming to the + // "cicd.pipeline.action.name" semantic conventions. It represents the kind of + // action a pipeline run is performing. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "BUILD", "RUN", "SYNC" + CICDPipelineActionNameKey = attribute.Key("cicd.pipeline.action.name") + + // CICDPipelineNameKey is the attribute Key conforming to the + // "cicd.pipeline.name" semantic conventions. It represents the human readable + // name of the pipeline within a CI/CD system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Build and Test", "Lint", "Deploy Go Project", + // "deploy_to_environment" + CICDPipelineNameKey = attribute.Key("cicd.pipeline.name") + + // CICDPipelineResultKey is the attribute Key conforming to the + // "cicd.pipeline.result" semantic conventions. It represents the result of a + // pipeline run. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "success", "failure", "timeout", "skipped" + CICDPipelineResultKey = attribute.Key("cicd.pipeline.result") + + // CICDPipelineRunIDKey is the attribute Key conforming to the + // "cicd.pipeline.run.id" semantic conventions. It represents the unique + // identifier of a pipeline run within a CI/CD system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "120912" + CICDPipelineRunIDKey = attribute.Key("cicd.pipeline.run.id") + + // CICDPipelineRunStateKey is the attribute Key conforming to the + // "cicd.pipeline.run.state" semantic conventions. It represents the pipeline + // run goes through these states during its lifecycle. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "pending", "executing", "finalizing" + CICDPipelineRunStateKey = attribute.Key("cicd.pipeline.run.state") + + // CICDPipelineRunURLFullKey is the attribute Key conforming to the + // "cicd.pipeline.run.url.full" semantic conventions. It represents the [URL] of + // the pipeline run, providing the complete address in order to locate and + // identify the pipeline run. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "https://github.com/open-telemetry/semantic-conventions/actions/runs/9753949763?pr=1075" + // + // [URL]: https://wikipedia.org/wiki/URL + CICDPipelineRunURLFullKey = attribute.Key("cicd.pipeline.run.url.full") + + // CICDPipelineTaskNameKey is the attribute Key conforming to the + // "cicd.pipeline.task.name" semantic conventions. It represents the human + // readable name of a task within a pipeline. Task here most closely aligns with + // a [computing process] in a pipeline. Other terms for tasks include commands, + // steps, and procedures. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Run GoLang Linter", "Go Build", "go-test", "deploy_binary" + // + // [computing process]: https://wikipedia.org/wiki/Pipeline_(computing) + CICDPipelineTaskNameKey = attribute.Key("cicd.pipeline.task.name") + + // CICDPipelineTaskRunIDKey is the attribute Key conforming to the + // "cicd.pipeline.task.run.id" semantic conventions. It represents the unique + // identifier of a task run within a pipeline. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "12097" + CICDPipelineTaskRunIDKey = attribute.Key("cicd.pipeline.task.run.id") + + // CICDPipelineTaskRunResultKey is the attribute Key conforming to the + // "cicd.pipeline.task.run.result" semantic conventions. It represents the + // result of a task run. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "success", "failure", "timeout", "skipped" + CICDPipelineTaskRunResultKey = attribute.Key("cicd.pipeline.task.run.result") + + // CICDPipelineTaskRunURLFullKey is the attribute Key conforming to the + // "cicd.pipeline.task.run.url.full" semantic conventions. It represents the + // [URL] of the pipeline task run, providing the complete address in order to + // locate and identify the pipeline task run. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "https://github.com/open-telemetry/semantic-conventions/actions/runs/9753949763/job/26920038674?pr=1075" + // + // [URL]: https://wikipedia.org/wiki/URL + CICDPipelineTaskRunURLFullKey = attribute.Key("cicd.pipeline.task.run.url.full") + + // CICDPipelineTaskTypeKey is the attribute Key conforming to the + // "cicd.pipeline.task.type" semantic conventions. It represents the type of the + // task within a pipeline. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "build", "test", "deploy" + CICDPipelineTaskTypeKey = attribute.Key("cicd.pipeline.task.type") + + // CICDSystemComponentKey is the attribute Key conforming to the + // "cicd.system.component" semantic conventions. It represents the name of a + // component of the CICD system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "controller", "scheduler", "agent" + CICDSystemComponentKey = attribute.Key("cicd.system.component") + + // CICDWorkerIDKey is the attribute Key conforming to the "cicd.worker.id" + // semantic conventions. It represents the unique identifier of a worker within + // a CICD system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "abc123", "10.0.1.2", "controller" + CICDWorkerIDKey = attribute.Key("cicd.worker.id") + + // CICDWorkerNameKey is the attribute Key conforming to the "cicd.worker.name" + // semantic conventions. It represents the name of a worker within a CICD + // system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "agent-abc", "controller", "Ubuntu LTS" + CICDWorkerNameKey = attribute.Key("cicd.worker.name") + + // CICDWorkerStateKey is the attribute Key conforming to the "cicd.worker.state" + // semantic conventions. It represents the state of a CICD worker / agent. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "idle", "busy", "down" + CICDWorkerStateKey = attribute.Key("cicd.worker.state") + + // CICDWorkerURLFullKey is the attribute Key conforming to the + // "cicd.worker.url.full" semantic conventions. It represents the [URL] of the + // worker, providing the complete address in order to locate and identify the + // worker. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://cicd.example.org/worker/abc123" + // + // [URL]: https://wikipedia.org/wiki/URL + CICDWorkerURLFullKey = attribute.Key("cicd.worker.url.full") +) + +// CICDPipelineName returns an attribute KeyValue conforming to the +// "cicd.pipeline.name" semantic conventions. It represents the human readable +// name of the pipeline within a CI/CD system. +func CICDPipelineName(val string) attribute.KeyValue { + return CICDPipelineNameKey.String(val) +} + +// CICDPipelineRunID returns an attribute KeyValue conforming to the +// "cicd.pipeline.run.id" semantic conventions. It represents the unique +// identifier of a pipeline run within a CI/CD system. +func CICDPipelineRunID(val string) attribute.KeyValue { + return CICDPipelineRunIDKey.String(val) +} + +// CICDPipelineRunURLFull returns an attribute KeyValue conforming to the +// "cicd.pipeline.run.url.full" semantic conventions. It represents the [URL] of +// the pipeline run, providing the complete address in order to locate and +// identify the pipeline run. +// +// [URL]: https://wikipedia.org/wiki/URL +func CICDPipelineRunURLFull(val string) attribute.KeyValue { + return CICDPipelineRunURLFullKey.String(val) +} + +// CICDPipelineTaskName returns an attribute KeyValue conforming to the +// "cicd.pipeline.task.name" semantic conventions. It represents the human +// readable name of a task within a pipeline. Task here most closely aligns with +// a [computing process] in a pipeline. Other terms for tasks include commands, +// steps, and procedures. +// +// [computing process]: https://wikipedia.org/wiki/Pipeline_(computing) +func CICDPipelineTaskName(val string) attribute.KeyValue { + return CICDPipelineTaskNameKey.String(val) +} + +// CICDPipelineTaskRunID returns an attribute KeyValue conforming to the +// "cicd.pipeline.task.run.id" semantic conventions. It represents the unique +// identifier of a task run within a pipeline. +func CICDPipelineTaskRunID(val string) attribute.KeyValue { + return CICDPipelineTaskRunIDKey.String(val) +} + +// CICDPipelineTaskRunURLFull returns an attribute KeyValue conforming to the +// "cicd.pipeline.task.run.url.full" semantic conventions. It represents the +// [URL] of the pipeline task run, providing the complete address in order to +// locate and identify the pipeline task run. +// +// [URL]: https://wikipedia.org/wiki/URL +func CICDPipelineTaskRunURLFull(val string) attribute.KeyValue { + return CICDPipelineTaskRunURLFullKey.String(val) +} + +// CICDSystemComponent returns an attribute KeyValue conforming to the +// "cicd.system.component" semantic conventions. It represents the name of a +// component of the CICD system. +func CICDSystemComponent(val string) attribute.KeyValue { + return CICDSystemComponentKey.String(val) +} + +// CICDWorkerID returns an attribute KeyValue conforming to the "cicd.worker.id" +// semantic conventions. It represents the unique identifier of a worker within a +// CICD system. +func CICDWorkerID(val string) attribute.KeyValue { + return CICDWorkerIDKey.String(val) +} + +// CICDWorkerName returns an attribute KeyValue conforming to the +// "cicd.worker.name" semantic conventions. It represents the name of a worker +// within a CICD system. +func CICDWorkerName(val string) attribute.KeyValue { + return CICDWorkerNameKey.String(val) +} + +// CICDWorkerURLFull returns an attribute KeyValue conforming to the +// "cicd.worker.url.full" semantic conventions. It represents the [URL] of the +// worker, providing the complete address in order to locate and identify the +// worker. +// +// [URL]: https://wikipedia.org/wiki/URL +func CICDWorkerURLFull(val string) attribute.KeyValue { + return CICDWorkerURLFullKey.String(val) +} + +// Enum values for cicd.pipeline.action.name +var ( + // The pipeline run is executing a build. + // Stability: development + CICDPipelineActionNameBuild = CICDPipelineActionNameKey.String("BUILD") + // The pipeline run is executing. + // Stability: development + CICDPipelineActionNameRun = CICDPipelineActionNameKey.String("RUN") + // The pipeline run is executing a sync. + // Stability: development + CICDPipelineActionNameSync = CICDPipelineActionNameKey.String("SYNC") +) + +// Enum values for cicd.pipeline.result +var ( + // The pipeline run finished successfully. + // Stability: development + CICDPipelineResultSuccess = CICDPipelineResultKey.String("success") + // The pipeline run did not finish successfully, eg. due to a compile error or a + // failing test. Such failures are usually detected by non-zero exit codes of + // the tools executed in the pipeline run. + // Stability: development + CICDPipelineResultFailure = CICDPipelineResultKey.String("failure") + // The pipeline run failed due to an error in the CICD system, eg. due to the + // worker being killed. + // Stability: development + CICDPipelineResultError = CICDPipelineResultKey.String("error") + // A timeout caused the pipeline run to be interrupted. + // Stability: development + CICDPipelineResultTimeout = CICDPipelineResultKey.String("timeout") + // The pipeline run was cancelled, eg. by a user manually cancelling the + // pipeline run. + // Stability: development + CICDPipelineResultCancellation = CICDPipelineResultKey.String("cancellation") + // The pipeline run was skipped, eg. due to a precondition not being met. + // Stability: development + CICDPipelineResultSkip = CICDPipelineResultKey.String("skip") +) + +// Enum values for cicd.pipeline.run.state +var ( + // The run pending state spans from the event triggering the pipeline run until + // the execution of the run starts (eg. time spent in a queue, provisioning + // agents, creating run resources). + // + // Stability: development + CICDPipelineRunStatePending = CICDPipelineRunStateKey.String("pending") + // The executing state spans the execution of any run tasks (eg. build, test). + // Stability: development + CICDPipelineRunStateExecuting = CICDPipelineRunStateKey.String("executing") + // The finalizing state spans from when the run has finished executing (eg. + // cleanup of run resources). + // Stability: development + CICDPipelineRunStateFinalizing = CICDPipelineRunStateKey.String("finalizing") +) + +// Enum values for cicd.pipeline.task.run.result +var ( + // The task run finished successfully. + // Stability: development + CICDPipelineTaskRunResultSuccess = CICDPipelineTaskRunResultKey.String("success") + // The task run did not finish successfully, eg. due to a compile error or a + // failing test. Such failures are usually detected by non-zero exit codes of + // the tools executed in the task run. + // Stability: development + CICDPipelineTaskRunResultFailure = CICDPipelineTaskRunResultKey.String("failure") + // The task run failed due to an error in the CICD system, eg. due to the worker + // being killed. + // Stability: development + CICDPipelineTaskRunResultError = CICDPipelineTaskRunResultKey.String("error") + // A timeout caused the task run to be interrupted. + // Stability: development + CICDPipelineTaskRunResultTimeout = CICDPipelineTaskRunResultKey.String("timeout") + // The task run was cancelled, eg. by a user manually cancelling the task run. + // Stability: development + CICDPipelineTaskRunResultCancellation = CICDPipelineTaskRunResultKey.String("cancellation") + // The task run was skipped, eg. due to a precondition not being met. + // Stability: development + CICDPipelineTaskRunResultSkip = CICDPipelineTaskRunResultKey.String("skip") +) + +// Enum values for cicd.pipeline.task.type +var ( + // build + // Stability: development + CICDPipelineTaskTypeBuild = CICDPipelineTaskTypeKey.String("build") + // test + // Stability: development + CICDPipelineTaskTypeTest = CICDPipelineTaskTypeKey.String("test") + // deploy + // Stability: development + CICDPipelineTaskTypeDeploy = CICDPipelineTaskTypeKey.String("deploy") +) + +// Enum values for cicd.worker.state +var ( + // The worker is not performing work for the CICD system. It is available to the + // CICD system to perform work on (online / idle). + // Stability: development + CICDWorkerStateAvailable = CICDWorkerStateKey.String("available") + // The worker is performing work for the CICD system. + // Stability: development + CICDWorkerStateBusy = CICDWorkerStateKey.String("busy") + // The worker is not available to the CICD system (disconnected / down). + // Stability: development + CICDWorkerStateOffline = CICDWorkerStateKey.String("offline") +) + +// Namespace: client +const ( + // ClientAddressKey is the attribute Key conforming to the "client.address" + // semantic conventions. It represents the client address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix domain + // socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "client.example.com", "10.1.2.80", "/tmp/my.sock" + // Note: When observed from the server side, and when communicating through an + // intermediary, `client.address` SHOULD represent the client address behind any + // intermediaries, for example proxies, if it's available. + ClientAddressKey = attribute.Key("client.address") + + // ClientPortKey is the attribute Key conforming to the "client.port" semantic + // conventions. It represents the client port number. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 65123 + // Note: When observed from the server side, and when communicating through an + // intermediary, `client.port` SHOULD represent the client port behind any + // intermediaries, for example proxies, if it's available. + ClientPortKey = attribute.Key("client.port") +) + +// ClientAddress returns an attribute KeyValue conforming to the "client.address" +// semantic conventions. It represents the client address - domain name if +// available without reverse DNS lookup; otherwise, IP address or Unix domain +// socket name. +func ClientAddress(val string) attribute.KeyValue { + return ClientAddressKey.String(val) +} + +// ClientPort returns an attribute KeyValue conforming to the "client.port" +// semantic conventions. It represents the client port number. +func ClientPort(val int) attribute.KeyValue { + return ClientPortKey.Int(val) +} + +// Namespace: cloud +const ( + // CloudAccountIDKey is the attribute Key conforming to the "cloud.account.id" + // semantic conventions. It represents the cloud account ID the resource is + // assigned to. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "111111111111", "opentelemetry" + CloudAccountIDKey = attribute.Key("cloud.account.id") + + // CloudAvailabilityZoneKey is the attribute Key conforming to the + // "cloud.availability_zone" semantic conventions. It represents the cloud + // regions often have multiple, isolated locations known as zones to increase + // availability. Availability zone represents the zone where the resource is + // running. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "us-east-1c" + // Note: Availability zones are called "zones" on Alibaba Cloud and Google + // Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + + // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" + // semantic conventions. It represents the cloud platform in use. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") + + // CloudProviderKey is the attribute Key conforming to the "cloud.provider" + // semantic conventions. It represents the name of the cloud provider. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + CloudProviderKey = attribute.Key("cloud.provider") + + // CloudRegionKey is the attribute Key conforming to the "cloud.region" semantic + // conventions. It represents the geographical region within a cloud provider. + // When associated with a resource, this attribute specifies the region where + // the resource operates. When calling services or APIs deployed on a cloud, + // this attribute identifies the region where the called destination is + // deployed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "us-central1", "us-east-1" + // Note: Refer to your provider's docs to see the available regions, for example + // [Alibaba Cloud regions], [AWS regions], [Azure regions], + // [Google Cloud regions], or [Tencent Cloud regions]. + // + // [Alibaba Cloud regions]: https://www.alibabacloud.com/help/doc-detail/40654.htm + // [AWS regions]: https://aws.amazon.com/about-aws/global-infrastructure/regions_az/ + // [Azure regions]: https://azure.microsoft.com/global-infrastructure/geographies/ + // [Google Cloud regions]: https://cloud.google.com/about/locations + // [Tencent Cloud regions]: https://www.tencentcloud.com/document/product/213/6091 + CloudRegionKey = attribute.Key("cloud.region") + + // CloudResourceIDKey is the attribute Key conforming to the "cloud.resource_id" + // semantic conventions. It represents the cloud provider-specific native + // identifier of the monitored cloud resource (e.g. an [ARN] on AWS, a + // [fully qualified resource ID] on Azure, a [full resource name] on GCP). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function", + // "//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID", + // "/subscriptions//resourceGroups/ + // /providers/Microsoft.Web/sites//functions/" + // Note: On some cloud providers, it may not be possible to determine the full + // ID at startup, + // so it may be necessary to set `cloud.resource_id` as a span attribute + // instead. + // + // The exact value to use for `cloud.resource_id` depends on the cloud provider. + // The following well-known definitions MUST be used if you set this attribute + // and they apply: + // + // - **AWS Lambda:** The function [ARN]. + // Take care not to use the "invoked ARN" directly but replace any + // [alias suffix] + // with the resolved function version, as the same runtime instance may be + // invocable with + // multiple different aliases. + // - **GCP:** The [URI of the resource] + // - **Azure:** The [Fully Qualified Resource ID] of the invoked function, + // *not* the function app, having the form + // + // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/` + // . + // This means that a span attribute MUST be used, as an Azure function app + // can host multiple functions that would usually share + // a TracerProvider. + // + // + // [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + // [fully qualified resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id + // [full resource name]: https://google.aip.dev/122#full-resource-names + // [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + // [alias suffix]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html + // [URI of the resource]: https://cloud.google.com/iam/docs/full-resource-names + // [Fully Qualified Resource ID]: https://docs.microsoft.com/rest/api/resources/resources/get-by-id + CloudResourceIDKey = attribute.Key("cloud.resource_id") +) + +// CloudAccountID returns an attribute KeyValue conforming to the +// "cloud.account.id" semantic conventions. It represents the cloud account ID +// the resource is assigned to. +func CloudAccountID(val string) attribute.KeyValue { + return CloudAccountIDKey.String(val) +} + +// CloudAvailabilityZone returns an attribute KeyValue conforming to the +// "cloud.availability_zone" semantic conventions. It represents the cloud +// regions often have multiple, isolated locations known as zones to increase +// availability. Availability zone represents the zone where the resource is +// running. +func CloudAvailabilityZone(val string) attribute.KeyValue { + return CloudAvailabilityZoneKey.String(val) +} + +// CloudRegion returns an attribute KeyValue conforming to the "cloud.region" +// semantic conventions. It represents the geographical region within a cloud +// provider. When associated with a resource, this attribute specifies the region +// where the resource operates. When calling services or APIs deployed on a +// cloud, this attribute identifies the region where the called destination is +// deployed. +func CloudRegion(val string) attribute.KeyValue { + return CloudRegionKey.String(val) +} + +// CloudResourceID returns an attribute KeyValue conforming to the +// "cloud.resource_id" semantic conventions. It represents the cloud +// provider-specific native identifier of the monitored cloud resource (e.g. an +// [ARN] on AWS, a [fully qualified resource ID] on Azure, a [full resource name] +// on GCP). +// +// [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html +// [fully qualified resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id +// [full resource name]: https://google.aip.dev/122#full-resource-names +func CloudResourceID(val string) attribute.KeyValue { + return CloudResourceIDKey.String(val) +} + +// Enum values for cloud.platform +var ( + // Alibaba Cloud Elastic Compute Service + // Stability: development + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + // Stability: development + CloudPlatformAlibabaCloudFC = CloudPlatformKey.String("alibaba_cloud_fc") + // Red Hat OpenShift on Alibaba Cloud + // Stability: development + CloudPlatformAlibabaCloudOpenShift = CloudPlatformKey.String("alibaba_cloud_openshift") + // AWS Elastic Compute Cloud + // Stability: development + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + // Stability: development + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + // Stability: development + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + // Stability: development + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + // Stability: development + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + // Stability: development + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Red Hat OpenShift on AWS (ROSA) + // Stability: development + CloudPlatformAWSOpenShift = CloudPlatformKey.String("aws_openshift") + // Azure Virtual Machines + // Stability: development + CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + // Azure Container Apps + // Stability: development + CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure_container_apps") + // Azure Container Instances + // Stability: development + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + // Azure Kubernetes Service + // Stability: development + CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + // Azure Functions + // Stability: development + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + // Azure App Service + // Stability: development + CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + // Azure Red Hat OpenShift + // Stability: development + CloudPlatformAzureOpenShift = CloudPlatformKey.String("azure_openshift") + // Google Bare Metal Solution (BMS) + // Stability: development + CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") + // Google Cloud Compute Engine (GCE) + // Stability: development + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + // Stability: development + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + // Stability: development + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + // Stability: development + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + // Stability: development + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Red Hat OpenShift on Google Cloud + // Stability: development + CloudPlatformGCPOpenShift = CloudPlatformKey.String("gcp_openshift") + // Red Hat OpenShift on IBM Cloud + // Stability: development + CloudPlatformIBMCloudOpenShift = CloudPlatformKey.String("ibm_cloud_openshift") + // Compute on Oracle Cloud Infrastructure (OCI) + // Stability: development + CloudPlatformOracleCloudCompute = CloudPlatformKey.String("oracle_cloud_compute") + // Kubernetes Engine (OKE) on Oracle Cloud Infrastructure (OCI) + // Stability: development + CloudPlatformOracleCloudOKE = CloudPlatformKey.String("oracle_cloud_oke") + // Tencent Cloud Cloud Virtual Machine (CVM) + // Stability: development + CloudPlatformTencentCloudCVM = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + // Stability: development + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + // Stability: development + CloudPlatformTencentCloudSCF = CloudPlatformKey.String("tencent_cloud_scf") +) + +// Enum values for cloud.provider +var ( + // Alibaba Cloud + // Stability: development + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + // Stability: development + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + // Stability: development + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + // Stability: development + CloudProviderGCP = CloudProviderKey.String("gcp") + // Heroku Platform as a Service + // Stability: development + CloudProviderHeroku = CloudProviderKey.String("heroku") + // IBM Cloud + // Stability: development + CloudProviderIBMCloud = CloudProviderKey.String("ibm_cloud") + // Oracle Cloud Infrastructure (OCI) + // Stability: development + CloudProviderOracleCloud = CloudProviderKey.String("oracle_cloud") + // Tencent Cloud + // Stability: development + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +// Namespace: cloudevents +const ( + // CloudEventsEventIDKey is the attribute Key conforming to the + // "cloudevents.event_id" semantic conventions. It represents the [event_id] + // uniquely identifies the event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "123e4567-e89b-12d3-a456-426614174000", "0001" + // + // [event_id]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id + CloudEventsEventIDKey = attribute.Key("cloudevents.event_id") + + // CloudEventsEventSourceKey is the attribute Key conforming to the + // "cloudevents.event_source" semantic conventions. It represents the [source] + // identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://github.com/cloudevents", "/cloudevents/spec/pull/123", + // "my-service" + // + // [source]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1 + CloudEventsEventSourceKey = attribute.Key("cloudevents.event_source") + + // CloudEventsEventSpecVersionKey is the attribute Key conforming to the + // "cloudevents.event_spec_version" semantic conventions. It represents the + // [version of the CloudEvents specification] which the event uses. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0 + // + // [version of the CloudEvents specification]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion + CloudEventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + + // CloudEventsEventSubjectKey is the attribute Key conforming to the + // "cloudevents.event_subject" semantic conventions. It represents the [subject] + // of the event in the context of the event producer (identified by source). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: mynewfile.jpg + // + // [subject]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject + CloudEventsEventSubjectKey = attribute.Key("cloudevents.event_subject") + + // CloudEventsEventTypeKey is the attribute Key conforming to the + // "cloudevents.event_type" semantic conventions. It represents the [event_type] + // contains a value describing the type of event related to the originating + // occurrence. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "com.github.pull_request.opened", "com.example.object.deleted.v2" + // + // [event_type]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type + CloudEventsEventTypeKey = attribute.Key("cloudevents.event_type") +) + +// CloudEventsEventID returns an attribute KeyValue conforming to the +// "cloudevents.event_id" semantic conventions. It represents the [event_id] +// uniquely identifies the event. +// +// [event_id]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id +func CloudEventsEventID(val string) attribute.KeyValue { + return CloudEventsEventIDKey.String(val) +} + +// CloudEventsEventSource returns an attribute KeyValue conforming to the +// "cloudevents.event_source" semantic conventions. It represents the [source] +// identifies the context in which an event happened. +// +// [source]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1 +func CloudEventsEventSource(val string) attribute.KeyValue { + return CloudEventsEventSourceKey.String(val) +} + +// CloudEventsEventSpecVersion returns an attribute KeyValue conforming to the +// "cloudevents.event_spec_version" semantic conventions. It represents the +// [version of the CloudEvents specification] which the event uses. +// +// [version of the CloudEvents specification]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion +func CloudEventsEventSpecVersion(val string) attribute.KeyValue { + return CloudEventsEventSpecVersionKey.String(val) +} + +// CloudEventsEventSubject returns an attribute KeyValue conforming to the +// "cloudevents.event_subject" semantic conventions. It represents the [subject] +// of the event in the context of the event producer (identified by source). +// +// [subject]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject +func CloudEventsEventSubject(val string) attribute.KeyValue { + return CloudEventsEventSubjectKey.String(val) +} + +// CloudEventsEventType returns an attribute KeyValue conforming to the +// "cloudevents.event_type" semantic conventions. It represents the [event_type] +// contains a value describing the type of event related to the originating +// occurrence. +// +// [event_type]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type +func CloudEventsEventType(val string) attribute.KeyValue { + return CloudEventsEventTypeKey.String(val) +} + +// Namespace: cloudfoundry +const ( + // CloudFoundryAppIDKey is the attribute Key conforming to the + // "cloudfoundry.app.id" semantic conventions. It represents the guid of the + // application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.application_id`. This is the same value as + // reported by `cf app --guid`. + CloudFoundryAppIDKey = attribute.Key("cloudfoundry.app.id") + + // CloudFoundryAppInstanceIDKey is the attribute Key conforming to the + // "cloudfoundry.app.instance.id" semantic conventions. It represents the index + // of the application instance. 0 when just one instance is active. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0", "1" + // Note: CloudFoundry defines the `instance_id` in the [Loggregator v2 envelope] + // . + // It is used for logs and metrics emitted by CloudFoundry. It is + // supposed to contain the application instance index for applications + // deployed on the runtime. + // + // Application instrumentation should use the value from environment + // variable `CF_INSTANCE_INDEX`. + // + // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope + CloudFoundryAppInstanceIDKey = attribute.Key("cloudfoundry.app.instance.id") + + // CloudFoundryAppNameKey is the attribute Key conforming to the + // "cloudfoundry.app.name" semantic conventions. It represents the name of the + // application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-app-name" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.application_name`. This is the same value + // as reported by `cf apps`. + CloudFoundryAppNameKey = attribute.Key("cloudfoundry.app.name") + + // CloudFoundryOrgIDKey is the attribute Key conforming to the + // "cloudfoundry.org.id" semantic conventions. It represents the guid of the + // CloudFoundry org the application is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.org_id`. This is the same value as + // reported by `cf org --guid`. + CloudFoundryOrgIDKey = attribute.Key("cloudfoundry.org.id") + + // CloudFoundryOrgNameKey is the attribute Key conforming to the + // "cloudfoundry.org.name" semantic conventions. It represents the name of the + // CloudFoundry organization the app is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-org-name" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.org_name`. This is the same value as + // reported by `cf orgs`. + CloudFoundryOrgNameKey = attribute.Key("cloudfoundry.org.name") + + // CloudFoundryProcessIDKey is the attribute Key conforming to the + // "cloudfoundry.process.id" semantic conventions. It represents the UID + // identifying the process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.process_id`. It is supposed to be equal to + // `VCAP_APPLICATION.app_id` for applications deployed to the runtime. + // For system components, this could be the actual PID. + CloudFoundryProcessIDKey = attribute.Key("cloudfoundry.process.id") + + // CloudFoundryProcessTypeKey is the attribute Key conforming to the + // "cloudfoundry.process.type" semantic conventions. It represents the type of + // process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "web" + // Note: CloudFoundry applications can consist of multiple jobs. Usually the + // main process will be of type `web`. There can be additional background + // tasks or side-cars with different process types. + CloudFoundryProcessTypeKey = attribute.Key("cloudfoundry.process.type") + + // CloudFoundrySpaceIDKey is the attribute Key conforming to the + // "cloudfoundry.space.id" semantic conventions. It represents the guid of the + // CloudFoundry space the application is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.space_id`. This is the same value as + // reported by `cf space --guid`. + CloudFoundrySpaceIDKey = attribute.Key("cloudfoundry.space.id") + + // CloudFoundrySpaceNameKey is the attribute Key conforming to the + // "cloudfoundry.space.name" semantic conventions. It represents the name of the + // CloudFoundry space the application is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-space-name" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.space_name`. This is the same value as + // reported by `cf spaces`. + CloudFoundrySpaceNameKey = attribute.Key("cloudfoundry.space.name") + + // CloudFoundrySystemIDKey is the attribute Key conforming to the + // "cloudfoundry.system.id" semantic conventions. It represents a guid or + // another name describing the event source. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cf/gorouter" + // Note: CloudFoundry defines the `source_id` in the [Loggregator v2 envelope]. + // It is used for logs and metrics emitted by CloudFoundry. It is + // supposed to contain the component name, e.g. "gorouter", for + // CloudFoundry components. + // + // When system components are instrumented, values from the + // [Bosh spec] + // should be used. The `system.id` should be set to + // `spec.deployment/spec.name`. + // + // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope + // [Bosh spec]: https://bosh.io/docs/jobs/#properties-spec + CloudFoundrySystemIDKey = attribute.Key("cloudfoundry.system.id") + + // CloudFoundrySystemInstanceIDKey is the attribute Key conforming to the + // "cloudfoundry.system.instance.id" semantic conventions. It represents a guid + // describing the concrete instance of the event source. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: CloudFoundry defines the `instance_id` in the [Loggregator v2 envelope] + // . + // It is used for logs and metrics emitted by CloudFoundry. It is + // supposed to contain the vm id for CloudFoundry components. + // + // When system components are instrumented, values from the + // [Bosh spec] + // should be used. The `system.instance.id` should be set to `spec.id`. + // + // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope + // [Bosh spec]: https://bosh.io/docs/jobs/#properties-spec + CloudFoundrySystemInstanceIDKey = attribute.Key("cloudfoundry.system.instance.id") +) + +// CloudFoundryAppID returns an attribute KeyValue conforming to the +// "cloudfoundry.app.id" semantic conventions. It represents the guid of the +// application. +func CloudFoundryAppID(val string) attribute.KeyValue { + return CloudFoundryAppIDKey.String(val) +} + +// CloudFoundryAppInstanceID returns an attribute KeyValue conforming to the +// "cloudfoundry.app.instance.id" semantic conventions. It represents the index +// of the application instance. 0 when just one instance is active. +func CloudFoundryAppInstanceID(val string) attribute.KeyValue { + return CloudFoundryAppInstanceIDKey.String(val) +} + +// CloudFoundryAppName returns an attribute KeyValue conforming to the +// "cloudfoundry.app.name" semantic conventions. It represents the name of the +// application. +func CloudFoundryAppName(val string) attribute.KeyValue { + return CloudFoundryAppNameKey.String(val) +} + +// CloudFoundryOrgID returns an attribute KeyValue conforming to the +// "cloudfoundry.org.id" semantic conventions. It represents the guid of the +// CloudFoundry org the application is running in. +func CloudFoundryOrgID(val string) attribute.KeyValue { + return CloudFoundryOrgIDKey.String(val) +} + +// CloudFoundryOrgName returns an attribute KeyValue conforming to the +// "cloudfoundry.org.name" semantic conventions. It represents the name of the +// CloudFoundry organization the app is running in. +func CloudFoundryOrgName(val string) attribute.KeyValue { + return CloudFoundryOrgNameKey.String(val) +} + +// CloudFoundryProcessID returns an attribute KeyValue conforming to the +// "cloudfoundry.process.id" semantic conventions. It represents the UID +// identifying the process. +func CloudFoundryProcessID(val string) attribute.KeyValue { + return CloudFoundryProcessIDKey.String(val) +} + +// CloudFoundryProcessType returns an attribute KeyValue conforming to the +// "cloudfoundry.process.type" semantic conventions. It represents the type of +// process. +func CloudFoundryProcessType(val string) attribute.KeyValue { + return CloudFoundryProcessTypeKey.String(val) +} + +// CloudFoundrySpaceID returns an attribute KeyValue conforming to the +// "cloudfoundry.space.id" semantic conventions. It represents the guid of the +// CloudFoundry space the application is running in. +func CloudFoundrySpaceID(val string) attribute.KeyValue { + return CloudFoundrySpaceIDKey.String(val) +} + +// CloudFoundrySpaceName returns an attribute KeyValue conforming to the +// "cloudfoundry.space.name" semantic conventions. It represents the name of the +// CloudFoundry space the application is running in. +func CloudFoundrySpaceName(val string) attribute.KeyValue { + return CloudFoundrySpaceNameKey.String(val) +} + +// CloudFoundrySystemID returns an attribute KeyValue conforming to the +// "cloudfoundry.system.id" semantic conventions. It represents a guid or another +// name describing the event source. +func CloudFoundrySystemID(val string) attribute.KeyValue { + return CloudFoundrySystemIDKey.String(val) +} + +// CloudFoundrySystemInstanceID returns an attribute KeyValue conforming to the +// "cloudfoundry.system.instance.id" semantic conventions. It represents a guid +// describing the concrete instance of the event source. +func CloudFoundrySystemInstanceID(val string) attribute.KeyValue { + return CloudFoundrySystemInstanceIDKey.String(val) +} + +// Namespace: code +const ( + // CodeColumnNumberKey is the attribute Key conforming to the + // "code.column.number" semantic conventions. It represents the column number in + // `code.file.path` best representing the operation. It SHOULD point within the + // code unit named in `code.function.name`. This attribute MUST NOT be used on + // the Profile signal since the data is already captured in 'message Line'. This + // constraint is imposed to prevent redundancy and maintain data integrity. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + CodeColumnNumberKey = attribute.Key("code.column.number") + + // CodeFilePathKey is the attribute Key conforming to the "code.file.path" + // semantic conventions. It represents the source code file name that identifies + // the code unit as uniquely as possible (preferably an absolute file path). + // This attribute MUST NOT be used on the Profile signal since the data is + // already captured in 'message Function'. This constraint is imposed to prevent + // redundancy and maintain data integrity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: /usr/local/MyApplication/content_root/app/index.php + CodeFilePathKey = attribute.Key("code.file.path") + + // CodeFunctionNameKey is the attribute Key conforming to the + // "code.function.name" semantic conventions. It represents the method or + // function fully-qualified name without arguments. The value should fit the + // natural representation of the language runtime, which is also likely the same + // used within `code.stacktrace` attribute value. This attribute MUST NOT be + // used on the Profile signal since the data is already captured in 'message + // Function'. This constraint is imposed to prevent redundancy and maintain data + // integrity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "com.example.MyHttpService.serveRequest", + // "GuzzleHttp\Client::transfer", "fopen" + // Note: Values and format depends on each language runtime, thus it is + // impossible to provide an exhaustive list of examples. + // The values are usually the same (or prefixes of) the ones found in native + // stack trace representation stored in + // `code.stacktrace` without information on arguments. + // + // Examples: + // + // - Java method: `com.example.MyHttpService.serveRequest` + // - Java anonymous class method: `com.mycompany.Main$1.myMethod` + // - Java lambda method: + // `com.mycompany.Main$$Lambda/0x0000748ae4149c00.myMethod` + // - PHP function: `GuzzleHttp\Client::transfer` + // - Go function: `github.com/my/repo/pkg.foo.func5` + // - Elixir: `OpenTelemetry.Ctx.new` + // - Erlang: `opentelemetry_ctx:new` + // - Rust: `playground::my_module::my_cool_func` + // - C function: `fopen` + CodeFunctionNameKey = attribute.Key("code.function.name") + + // CodeLineNumberKey is the attribute Key conforming to the "code.line.number" + // semantic conventions. It represents the line number in `code.file.path` best + // representing the operation. It SHOULD point within the code unit named in + // `code.function.name`. This attribute MUST NOT be used on the Profile signal + // since the data is already captured in 'message Line'. This constraint is + // imposed to prevent redundancy and maintain data integrity. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + CodeLineNumberKey = attribute.Key("code.line.number") + + // CodeStacktraceKey is the attribute Key conforming to the "code.stacktrace" + // semantic conventions. It represents a stacktrace as a string in the natural + // representation for the language runtime. The representation is identical to + // [`exception.stacktrace`]. This attribute MUST NOT be used on the Profile + // signal since the data is already captured in 'message Location'. This + // constraint is imposed to prevent redundancy and maintain data integrity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: at com.example.GenerateTrace.methodB(GenerateTrace.java:13)\n at + // com.example.GenerateTrace.methodA(GenerateTrace.java:9)\n at + // com.example.GenerateTrace.main(GenerateTrace.java:5) + // + // [`exception.stacktrace`]: /docs/exceptions/exceptions-spans.md#stacktrace-representation + CodeStacktraceKey = attribute.Key("code.stacktrace") +) + +// CodeColumnNumber returns an attribute KeyValue conforming to the +// "code.column.number" semantic conventions. It represents the column number in +// `code.file.path` best representing the operation. It SHOULD point within the +// code unit named in `code.function.name`. This attribute MUST NOT be used on +// the Profile signal since the data is already captured in 'message Line'. This +// constraint is imposed to prevent redundancy and maintain data integrity. +func CodeColumnNumber(val int) attribute.KeyValue { + return CodeColumnNumberKey.Int(val) +} + +// CodeFilePath returns an attribute KeyValue conforming to the "code.file.path" +// semantic conventions. It represents the source code file name that identifies +// the code unit as uniquely as possible (preferably an absolute file path). This +// attribute MUST NOT be used on the Profile signal since the data is already +// captured in 'message Function'. This constraint is imposed to prevent +// redundancy and maintain data integrity. +func CodeFilePath(val string) attribute.KeyValue { + return CodeFilePathKey.String(val) +} + +// CodeFunctionName returns an attribute KeyValue conforming to the +// "code.function.name" semantic conventions. It represents the method or +// function fully-qualified name without arguments. The value should fit the +// natural representation of the language runtime, which is also likely the same +// used within `code.stacktrace` attribute value. This attribute MUST NOT be used +// on the Profile signal since the data is already captured in 'message +// Function'. This constraint is imposed to prevent redundancy and maintain data +// integrity. +func CodeFunctionName(val string) attribute.KeyValue { + return CodeFunctionNameKey.String(val) +} + +// CodeLineNumber returns an attribute KeyValue conforming to the +// "code.line.number" semantic conventions. It represents the line number in +// `code.file.path` best representing the operation. It SHOULD point within the +// code unit named in `code.function.name`. This attribute MUST NOT be used on +// the Profile signal since the data is already captured in 'message Line'. This +// constraint is imposed to prevent redundancy and maintain data integrity. +func CodeLineNumber(val int) attribute.KeyValue { + return CodeLineNumberKey.Int(val) +} + +// CodeStacktrace returns an attribute KeyValue conforming to the +// "code.stacktrace" semantic conventions. It represents a stacktrace as a string +// in the natural representation for the language runtime. The representation is +// identical to [`exception.stacktrace`]. This attribute MUST NOT be used on the +// Profile signal since the data is already captured in 'message Location'. This +// constraint is imposed to prevent redundancy and maintain data integrity. +// +// [`exception.stacktrace`]: /docs/exceptions/exceptions-spans.md#stacktrace-representation +func CodeStacktrace(val string) attribute.KeyValue { + return CodeStacktraceKey.String(val) +} + +// Namespace: container +const ( + // ContainerCommandKey is the attribute Key conforming to the + // "container.command" semantic conventions. It represents the command used to + // run the container (i.e. the command name). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otelcontribcol" + // Note: If using embedded credentials or sensitive data, it is recommended to + // remove them to prevent potential leakage. + ContainerCommandKey = attribute.Key("container.command") + + // ContainerCommandArgsKey is the attribute Key conforming to the + // "container.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) run by the + // container. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otelcontribcol", "--config", "config.yaml" + ContainerCommandArgsKey = attribute.Key("container.command_args") + + // ContainerCommandLineKey is the attribute Key conforming to the + // "container.command_line" semantic conventions. It represents the full command + // run by the container as a single string representing the full command. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otelcontribcol --config config.yaml" + ContainerCommandLineKey = attribute.Key("container.command_line") + + // ContainerCSIPluginNameKey is the attribute Key conforming to the + // "container.csi.plugin.name" semantic conventions. It represents the name of + // the CSI ([Container Storage Interface]) plugin used by the volume. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "pd.csi.storage.gke.io" + // Note: This can sometimes be referred to as a "driver" in CSI implementations. + // This should represent the `name` field of the GetPluginInfo RPC. + // + // [Container Storage Interface]: https://github.com/container-storage-interface/spec + ContainerCSIPluginNameKey = attribute.Key("container.csi.plugin.name") + + // ContainerCSIVolumeIDKey is the attribute Key conforming to the + // "container.csi.volume.id" semantic conventions. It represents the unique + // volume ID returned by the CSI ([Container Storage Interface]) plugin. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "projects/my-gcp-project/zones/my-gcp-zone/disks/my-gcp-disk" + // Note: This can sometimes be referred to as a "volume handle" in CSI + // implementations. This should represent the `Volume.volume_id` field in CSI + // spec. + // + // [Container Storage Interface]: https://github.com/container-storage-interface/spec + ContainerCSIVolumeIDKey = attribute.Key("container.csi.volume.id") + + // ContainerIDKey is the attribute Key conforming to the "container.id" semantic + // conventions. It represents the container ID. Usually a UUID, as for example + // used to [identify Docker containers]. The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "a3bf90e006b2" + // + // [identify Docker containers]: https://docs.docker.com/engine/containers/run/#container-identification + ContainerIDKey = attribute.Key("container.id") + + // ContainerImageIDKey is the attribute Key conforming to the + // "container.image.id" semantic conventions. It represents the runtime specific + // image identifier. Usually a hash algorithm followed by a UUID. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f" + // Note: Docker defines a sha256 of the image id; `container.image.id` + // corresponds to the `Image` field from the Docker container inspect [API] + // endpoint. + // K8s defines a link to the container registry repository with digest + // `"imageID": "registry.azurecr.io /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"` + // . + // The ID is assigned by the container runtime and can vary in different + // environments. Consider using `oci.manifest.digest` if it is important to + // identify the same image in different environments/runtimes. + // + // [API]: https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect + ContainerImageIDKey = attribute.Key("container.image.id") + + // ContainerImageNameKey is the attribute Key conforming to the + // "container.image.name" semantic conventions. It represents the name of the + // image the container was built on. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "gcr.io/opentelemetry/operator" + ContainerImageNameKey = attribute.Key("container.image.name") + + // ContainerImageRepoDigestsKey is the attribute Key conforming to the + // "container.image.repo_digests" semantic conventions. It represents the repo + // digests of the container image as provided by the container runtime. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb", + // "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" + // Note: [Docker] and [CRI] report those under the `RepoDigests` field. + // + // [Docker]: https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect + // [CRI]: https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238 + ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests") + + // ContainerImageTagsKey is the attribute Key conforming to the + // "container.image.tags" semantic conventions. It represents the container + // image tags. An example can be found in [Docker Image Inspect]. Should be only + // the `` section of the full name for example from + // `registry.example.com/my-org/my-image:`. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "v1.27.1", "3.5.7-0" + // + // [Docker Image Inspect]: https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect + ContainerImageTagsKey = attribute.Key("container.image.tags") + + // ContainerNameKey is the attribute Key conforming to the "container.name" + // semantic conventions. It represents the container name used by container + // runtime. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-autoconf" + ContainerNameKey = attribute.Key("container.name") + + // ContainerRuntimeKey is the attribute Key conforming to the + // "container.runtime" semantic conventions. It represents the container runtime + // managing this container. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "docker", "containerd", "rkt" + ContainerRuntimeKey = attribute.Key("container.runtime") +) + +// ContainerCommand returns an attribute KeyValue conforming to the +// "container.command" semantic conventions. It represents the command used to +// run the container (i.e. the command name). +func ContainerCommand(val string) attribute.KeyValue { + return ContainerCommandKey.String(val) +} + +// ContainerCommandArgs returns an attribute KeyValue conforming to the +// "container.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) run by the +// container. +func ContainerCommandArgs(val ...string) attribute.KeyValue { + return ContainerCommandArgsKey.StringSlice(val) +} + +// ContainerCommandLine returns an attribute KeyValue conforming to the +// "container.command_line" semantic conventions. It represents the full command +// run by the container as a single string representing the full command. +func ContainerCommandLine(val string) attribute.KeyValue { + return ContainerCommandLineKey.String(val) +} + +// ContainerCSIPluginName returns an attribute KeyValue conforming to the +// "container.csi.plugin.name" semantic conventions. It represents the name of +// the CSI ([Container Storage Interface]) plugin used by the volume. +// +// [Container Storage Interface]: https://github.com/container-storage-interface/spec +func ContainerCSIPluginName(val string) attribute.KeyValue { + return ContainerCSIPluginNameKey.String(val) +} + +// ContainerCSIVolumeID returns an attribute KeyValue conforming to the +// "container.csi.volume.id" semantic conventions. It represents the unique +// volume ID returned by the CSI ([Container Storage Interface]) plugin. +// +// [Container Storage Interface]: https://github.com/container-storage-interface/spec +func ContainerCSIVolumeID(val string) attribute.KeyValue { + return ContainerCSIVolumeIDKey.String(val) +} + +// ContainerID returns an attribute KeyValue conforming to the "container.id" +// semantic conventions. It represents the container ID. Usually a UUID, as for +// example used to [identify Docker containers]. The UUID might be abbreviated. +// +// [identify Docker containers]: https://docs.docker.com/engine/containers/run/#container-identification +func ContainerID(val string) attribute.KeyValue { + return ContainerIDKey.String(val) +} + +// ContainerImageID returns an attribute KeyValue conforming to the +// "container.image.id" semantic conventions. It represents the runtime specific +// image identifier. Usually a hash algorithm followed by a UUID. +func ContainerImageID(val string) attribute.KeyValue { + return ContainerImageIDKey.String(val) +} + +// ContainerImageName returns an attribute KeyValue conforming to the +// "container.image.name" semantic conventions. It represents the name of the +// image the container was built on. +func ContainerImageName(val string) attribute.KeyValue { + return ContainerImageNameKey.String(val) +} + +// ContainerImageRepoDigests returns an attribute KeyValue conforming to the +// "container.image.repo_digests" semantic conventions. It represents the repo +// digests of the container image as provided by the container runtime. +func ContainerImageRepoDigests(val ...string) attribute.KeyValue { + return ContainerImageRepoDigestsKey.StringSlice(val) +} + +// ContainerImageTags returns an attribute KeyValue conforming to the +// "container.image.tags" semantic conventions. It represents the container image +// tags. An example can be found in [Docker Image Inspect]. Should be only the +// `` section of the full name for example from +// `registry.example.com/my-org/my-image:`. +// +// [Docker Image Inspect]: https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect +func ContainerImageTags(val ...string) attribute.KeyValue { + return ContainerImageTagsKey.StringSlice(val) +} + +// ContainerName returns an attribute KeyValue conforming to the "container.name" +// semantic conventions. It represents the container name used by container +// runtime. +func ContainerName(val string) attribute.KeyValue { + return ContainerNameKey.String(val) +} + +// ContainerRuntime returns an attribute KeyValue conforming to the +// "container.runtime" semantic conventions. It represents the container runtime +// managing this container. +func ContainerRuntime(val string) attribute.KeyValue { + return ContainerRuntimeKey.String(val) +} + +// Namespace: cpu +const ( + // CPULogicalNumberKey is the attribute Key conforming to the + // "cpu.logical_number" semantic conventions. It represents the logical CPU + // number [0..n-1]. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1 + CPULogicalNumberKey = attribute.Key("cpu.logical_number") + + // CPUModeKey is the attribute Key conforming to the "cpu.mode" semantic + // conventions. It represents the mode of the CPU. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "user", "system" + CPUModeKey = attribute.Key("cpu.mode") +) + +// CPULogicalNumber returns an attribute KeyValue conforming to the +// "cpu.logical_number" semantic conventions. It represents the logical CPU +// number [0..n-1]. +func CPULogicalNumber(val int) attribute.KeyValue { + return CPULogicalNumberKey.Int(val) +} + +// Enum values for cpu.mode +var ( + // user + // Stability: development + CPUModeUser = CPUModeKey.String("user") + // system + // Stability: development + CPUModeSystem = CPUModeKey.String("system") + // nice + // Stability: development + CPUModeNice = CPUModeKey.String("nice") + // idle + // Stability: development + CPUModeIdle = CPUModeKey.String("idle") + // iowait + // Stability: development + CPUModeIOWait = CPUModeKey.String("iowait") + // interrupt + // Stability: development + CPUModeInterrupt = CPUModeKey.String("interrupt") + // steal + // Stability: development + CPUModeSteal = CPUModeKey.String("steal") + // kernel + // Stability: development + CPUModeKernel = CPUModeKey.String("kernel") +) + +// Namespace: db +const ( + // DBClientConnectionPoolNameKey is the attribute Key conforming to the + // "db.client.connection.pool.name" semantic conventions. It represents the name + // of the connection pool; unique within the instrumented application. In case + // the connection pool implementation doesn't provide a name, instrumentation + // SHOULD use a combination of parameters that would make the name unique, for + // example, combining attributes `server.address`, `server.port`, and + // `db.namespace`, formatted as `server.address:server.port/db.namespace`. + // Instrumentations that generate connection pool name following different + // patterns SHOULD document it. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "myDataSource" + DBClientConnectionPoolNameKey = attribute.Key("db.client.connection.pool.name") + + // DBClientConnectionStateKey is the attribute Key conforming to the + // "db.client.connection.state" semantic conventions. It represents the state of + // a connection in the pool. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "idle" + DBClientConnectionStateKey = attribute.Key("db.client.connection.state") + + // DBCollectionNameKey is the attribute Key conforming to the + // "db.collection.name" semantic conventions. It represents the name of a + // collection (table, container) within the database. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "public.users", "customers" + // Note: It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + // + // The collection name SHOULD NOT be extracted from `db.query.text`, + // when the database system supports query text with multiple collections + // in non-batch operations. + // + // For batch operations, if the individual operations are known to have the same + // collection name then that collection name SHOULD be used. + DBCollectionNameKey = attribute.Key("db.collection.name") + + // DBNamespaceKey is the attribute Key conforming to the "db.namespace" semantic + // conventions. It represents the name of the database, fully qualified within + // the server address and port. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "customers", "test.users" + // Note: If a database system has multiple namespace components, they SHOULD be + // concatenated from the most general to the most specific namespace component, + // using `|` as a separator between the components. Any missing components (and + // their associated separators) SHOULD be omitted. + // Semantic conventions for individual database systems SHOULD document what + // `db.namespace` means in the context of that system. + // It is RECOMMENDED to capture the value as provided by the application without + // attempting to do any case normalization. + DBNamespaceKey = attribute.Key("db.namespace") + + // DBOperationBatchSizeKey is the attribute Key conforming to the + // "db.operation.batch.size" semantic conventions. It represents the number of + // queries included in a batch operation. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 2, 3, 4 + // Note: Operations are only considered batches when they contain two or more + // operations, and so `db.operation.batch.size` SHOULD never be `1`. + DBOperationBatchSizeKey = attribute.Key("db.operation.batch.size") + + // DBOperationNameKey is the attribute Key conforming to the "db.operation.name" + // semantic conventions. It represents the name of the operation or command + // being executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "findAndModify", "HMSET", "SELECT" + // Note: It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + // + // The operation name SHOULD NOT be extracted from `db.query.text`, + // when the database system supports query text with multiple operations + // in non-batch operations. + // + // If spaces can occur in the operation name, multiple consecutive spaces + // SHOULD be normalized to a single space. + // + // For batch operations, if the individual operations are known to have the same + // operation name + // then that operation name SHOULD be used prepended by `BATCH `, + // otherwise `db.operation.name` SHOULD be `BATCH` or some other database + // system specific term if more applicable. + DBOperationNameKey = attribute.Key("db.operation.name") + + // DBQuerySummaryKey is the attribute Key conforming to the "db.query.summary" + // semantic conventions. It represents the low cardinality summary of a database + // query. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "SELECT wuser_table", "INSERT shipping_details SELECT orders", "get + // user by id" + // Note: The query summary describes a class of database queries and is useful + // as a grouping key, especially when analyzing telemetry for database + // calls involving complex queries. + // + // Summary may be available to the instrumentation through + // instrumentation hooks or other means. If it is not available, + // instrumentations + // that support query parsing SHOULD generate a summary following + // [Generating query summary] + // section. + // + // [Generating query summary]: /docs/database/database-spans.md#generating-a-summary-of-the-query + DBQuerySummaryKey = attribute.Key("db.query.summary") + + // DBQueryTextKey is the attribute Key conforming to the "db.query.text" + // semantic conventions. It represents the database query being executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "SELECT * FROM wuser_table where username = ?", "SET mykey ?" + // Note: For sanitization see [Sanitization of `db.query.text`]. + // For batch operations, if the individual operations are known to have the same + // query text then that query text SHOULD be used, otherwise all of the + // individual query texts SHOULD be concatenated with separator `; ` or some + // other database system specific separator if more applicable. + // Parameterized query text SHOULD NOT be sanitized. Even though parameterized + // query text can potentially have sensitive data, by using a parameterized + // query the user is giving a strong signal that any sensitive data will be + // passed as parameter values, and the benefit to observability of capturing the + // static part of the query text by default outweighs the risk. + // + // [Sanitization of `db.query.text`]: /docs/database/database-spans.md#sanitization-of-dbquerytext + DBQueryTextKey = attribute.Key("db.query.text") + + // DBResponseReturnedRowsKey is the attribute Key conforming to the + // "db.response.returned_rows" semantic conventions. It represents the number of + // rows returned by the operation. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 10, 30, 1000 + DBResponseReturnedRowsKey = attribute.Key("db.response.returned_rows") + + // DBResponseStatusCodeKey is the attribute Key conforming to the + // "db.response.status_code" semantic conventions. It represents the database + // response status code. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "102", "ORA-17002", "08P01", "404" + // Note: The status code returned by the database. Usually it represents an + // error code, but may also represent partial success, warning, or differentiate + // between various types of successful outcomes. + // Semantic conventions for individual database systems SHOULD document what + // `db.response.status_code` means in the context of that system. + DBResponseStatusCodeKey = attribute.Key("db.response.status_code") + + // DBStoredProcedureNameKey is the attribute Key conforming to the + // "db.stored_procedure.name" semantic conventions. It represents the name of a + // stored procedure within the database. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "GetCustomer" + // Note: It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + // + // For batch operations, if the individual operations are known to have the same + // stored procedure name then that stored procedure name SHOULD be used. + DBStoredProcedureNameKey = attribute.Key("db.stored_procedure.name") + + // DBSystemNameKey is the attribute Key conforming to the "db.system.name" + // semantic conventions. It represents the database management system (DBMS) + // product as identified by the client instrumentation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: + // Note: The actual DBMS may differ from the one identified by the client. For + // example, when using PostgreSQL client libraries to connect to a CockroachDB, + // the `db.system.name` is set to `postgresql` based on the instrumentation's + // best knowledge. + DBSystemNameKey = attribute.Key("db.system.name") +) + +// DBClientConnectionPoolName returns an attribute KeyValue conforming to the +// "db.client.connection.pool.name" semantic conventions. It represents the name +// of the connection pool; unique within the instrumented application. In case +// the connection pool implementation doesn't provide a name, instrumentation +// SHOULD use a combination of parameters that would make the name unique, for +// example, combining attributes `server.address`, `server.port`, and +// `db.namespace`, formatted as `server.address:server.port/db.namespace`. +// Instrumentations that generate connection pool name following different +// patterns SHOULD document it. +func DBClientConnectionPoolName(val string) attribute.KeyValue { + return DBClientConnectionPoolNameKey.String(val) +} + +// DBCollectionName returns an attribute KeyValue conforming to the +// "db.collection.name" semantic conventions. It represents the name of a +// collection (table, container) within the database. +func DBCollectionName(val string) attribute.KeyValue { + return DBCollectionNameKey.String(val) +} + +// DBNamespace returns an attribute KeyValue conforming to the "db.namespace" +// semantic conventions. It represents the name of the database, fully qualified +// within the server address and port. +func DBNamespace(val string) attribute.KeyValue { + return DBNamespaceKey.String(val) +} + +// DBOperationBatchSize returns an attribute KeyValue conforming to the +// "db.operation.batch.size" semantic conventions. It represents the number of +// queries included in a batch operation. +func DBOperationBatchSize(val int) attribute.KeyValue { + return DBOperationBatchSizeKey.Int(val) +} + +// DBOperationName returns an attribute KeyValue conforming to the +// "db.operation.name" semantic conventions. It represents the name of the +// operation or command being executed. +func DBOperationName(val string) attribute.KeyValue { + return DBOperationNameKey.String(val) +} + +// DBQuerySummary returns an attribute KeyValue conforming to the +// "db.query.summary" semantic conventions. It represents the low cardinality +// summary of a database query. +func DBQuerySummary(val string) attribute.KeyValue { + return DBQuerySummaryKey.String(val) +} + +// DBQueryText returns an attribute KeyValue conforming to the "db.query.text" +// semantic conventions. It represents the database query being executed. +func DBQueryText(val string) attribute.KeyValue { + return DBQueryTextKey.String(val) +} + +// DBResponseReturnedRows returns an attribute KeyValue conforming to the +// "db.response.returned_rows" semantic conventions. It represents the number of +// rows returned by the operation. +func DBResponseReturnedRows(val int) attribute.KeyValue { + return DBResponseReturnedRowsKey.Int(val) +} + +// DBResponseStatusCode returns an attribute KeyValue conforming to the +// "db.response.status_code" semantic conventions. It represents the database +// response status code. +func DBResponseStatusCode(val string) attribute.KeyValue { + return DBResponseStatusCodeKey.String(val) +} + +// DBStoredProcedureName returns an attribute KeyValue conforming to the +// "db.stored_procedure.name" semantic conventions. It represents the name of a +// stored procedure within the database. +func DBStoredProcedureName(val string) attribute.KeyValue { + return DBStoredProcedureNameKey.String(val) +} + +// Enum values for db.client.connection.state +var ( + // idle + // Stability: development + DBClientConnectionStateIdle = DBClientConnectionStateKey.String("idle") + // used + // Stability: development + DBClientConnectionStateUsed = DBClientConnectionStateKey.String("used") +) + +// Enum values for db.system.name +var ( + // Some other SQL database. Fallback only. + // Stability: development + DBSystemNameOtherSQL = DBSystemNameKey.String("other_sql") + // [Adabas (Adaptable Database System)] + // Stability: development + // + // [Adabas (Adaptable Database System)]: https://documentation.softwareag.com/?pf=adabas + DBSystemNameSoftwareagAdabas = DBSystemNameKey.String("softwareag.adabas") + // [Actian Ingres] + // Stability: development + // + // [Actian Ingres]: https://www.actian.com/databases/ingres/ + DBSystemNameActianIngres = DBSystemNameKey.String("actian.ingres") + // [Amazon DynamoDB] + // Stability: development + // + // [Amazon DynamoDB]: https://aws.amazon.com/pm/dynamodb/ + DBSystemNameAWSDynamoDB = DBSystemNameKey.String("aws.dynamodb") + // [Amazon Redshift] + // Stability: development + // + // [Amazon Redshift]: https://aws.amazon.com/redshift/ + DBSystemNameAWSRedshift = DBSystemNameKey.String("aws.redshift") + // [Azure Cosmos DB] + // Stability: development + // + // [Azure Cosmos DB]: https://learn.microsoft.com/azure/cosmos-db + DBSystemNameAzureCosmosDB = DBSystemNameKey.String("azure.cosmosdb") + // [InterSystems Caché] + // Stability: development + // + // [InterSystems Caché]: https://www.intersystems.com/products/cache/ + DBSystemNameIntersystemsCache = DBSystemNameKey.String("intersystems.cache") + // [Apache Cassandra] + // Stability: development + // + // [Apache Cassandra]: https://cassandra.apache.org/ + DBSystemNameCassandra = DBSystemNameKey.String("cassandra") + // [ClickHouse] + // Stability: development + // + // [ClickHouse]: https://clickhouse.com/ + DBSystemNameClickHouse = DBSystemNameKey.String("clickhouse") + // [CockroachDB] + // Stability: development + // + // [CockroachDB]: https://www.cockroachlabs.com/ + DBSystemNameCockroachDB = DBSystemNameKey.String("cockroachdb") + // [Couchbase] + // Stability: development + // + // [Couchbase]: https://www.couchbase.com/ + DBSystemNameCouchbase = DBSystemNameKey.String("couchbase") + // [Apache CouchDB] + // Stability: development + // + // [Apache CouchDB]: https://couchdb.apache.org/ + DBSystemNameCouchDB = DBSystemNameKey.String("couchdb") + // [Apache Derby] + // Stability: development + // + // [Apache Derby]: https://db.apache.org/derby/ + DBSystemNameDerby = DBSystemNameKey.String("derby") + // [Elasticsearch] + // Stability: development + // + // [Elasticsearch]: https://www.elastic.co/elasticsearch + DBSystemNameElasticsearch = DBSystemNameKey.String("elasticsearch") + // [Firebird] + // Stability: development + // + // [Firebird]: https://www.firebirdsql.org/ + DBSystemNameFirebirdSQL = DBSystemNameKey.String("firebirdsql") + // [Google Cloud Spanner] + // Stability: development + // + // [Google Cloud Spanner]: https://cloud.google.com/spanner + DBSystemNameGCPSpanner = DBSystemNameKey.String("gcp.spanner") + // [Apache Geode] + // Stability: development + // + // [Apache Geode]: https://geode.apache.org/ + DBSystemNameGeode = DBSystemNameKey.String("geode") + // [H2 Database] + // Stability: development + // + // [H2 Database]: https://h2database.com/ + DBSystemNameH2database = DBSystemNameKey.String("h2database") + // [Apache HBase] + // Stability: development + // + // [Apache HBase]: https://hbase.apache.org/ + DBSystemNameHBase = DBSystemNameKey.String("hbase") + // [Apache Hive] + // Stability: development + // + // [Apache Hive]: https://hive.apache.org/ + DBSystemNameHive = DBSystemNameKey.String("hive") + // [HyperSQL Database] + // Stability: development + // + // [HyperSQL Database]: https://hsqldb.org/ + DBSystemNameHSQLDB = DBSystemNameKey.String("hsqldb") + // [IBM Db2] + // Stability: development + // + // [IBM Db2]: https://www.ibm.com/db2 + DBSystemNameIBMDB2 = DBSystemNameKey.String("ibm.db2") + // [IBM Informix] + // Stability: development + // + // [IBM Informix]: https://www.ibm.com/products/informix + DBSystemNameIBMInformix = DBSystemNameKey.String("ibm.informix") + // [IBM Netezza] + // Stability: development + // + // [IBM Netezza]: https://www.ibm.com/products/netezza + DBSystemNameIBMNetezza = DBSystemNameKey.String("ibm.netezza") + // [InfluxDB] + // Stability: development + // + // [InfluxDB]: https://www.influxdata.com/ + DBSystemNameInfluxDB = DBSystemNameKey.String("influxdb") + // [Instant] + // Stability: development + // + // [Instant]: https://www.instantdb.com/ + DBSystemNameInstantDB = DBSystemNameKey.String("instantdb") + // [MariaDB] + // Stability: stable + // + // [MariaDB]: https://mariadb.org/ + DBSystemNameMariaDB = DBSystemNameKey.String("mariadb") + // [Memcached] + // Stability: development + // + // [Memcached]: https://memcached.org/ + DBSystemNameMemcached = DBSystemNameKey.String("memcached") + // [MongoDB] + // Stability: development + // + // [MongoDB]: https://www.mongodb.com/ + DBSystemNameMongoDB = DBSystemNameKey.String("mongodb") + // [Microsoft SQL Server] + // Stability: stable + // + // [Microsoft SQL Server]: https://www.microsoft.com/sql-server + DBSystemNameMicrosoftSQLServer = DBSystemNameKey.String("microsoft.sql_server") + // [MySQL] + // Stability: stable + // + // [MySQL]: https://www.mysql.com/ + DBSystemNameMySQL = DBSystemNameKey.String("mysql") + // [Neo4j] + // Stability: development + // + // [Neo4j]: https://neo4j.com/ + DBSystemNameNeo4j = DBSystemNameKey.String("neo4j") + // [OpenSearch] + // Stability: development + // + // [OpenSearch]: https://opensearch.org/ + DBSystemNameOpenSearch = DBSystemNameKey.String("opensearch") + // [Oracle Database] + // Stability: development + // + // [Oracle Database]: https://www.oracle.com/database/ + DBSystemNameOracleDB = DBSystemNameKey.String("oracle.db") + // [PostgreSQL] + // Stability: stable + // + // [PostgreSQL]: https://www.postgresql.org/ + DBSystemNamePostgreSQL = DBSystemNameKey.String("postgresql") + // [Redis] + // Stability: development + // + // [Redis]: https://redis.io/ + DBSystemNameRedis = DBSystemNameKey.String("redis") + // [SAP HANA] + // Stability: development + // + // [SAP HANA]: https://www.sap.com/products/technology-platform/hana/what-is-sap-hana.html + DBSystemNameSAPHANA = DBSystemNameKey.String("sap.hana") + // [SAP MaxDB] + // Stability: development + // + // [SAP MaxDB]: https://maxdb.sap.com/ + DBSystemNameSAPMaxDB = DBSystemNameKey.String("sap.maxdb") + // [SQLite] + // Stability: development + // + // [SQLite]: https://www.sqlite.org/ + DBSystemNameSQLite = DBSystemNameKey.String("sqlite") + // [Teradata] + // Stability: development + // + // [Teradata]: https://www.teradata.com/ + DBSystemNameTeradata = DBSystemNameKey.String("teradata") + // [Trino] + // Stability: development + // + // [Trino]: https://trino.io/ + DBSystemNameTrino = DBSystemNameKey.String("trino") +) + +// Namespace: deployment +const ( + // DeploymentEnvironmentNameKey is the attribute Key conforming to the + // "deployment.environment.name" semantic conventions. It represents the name of + // the [deployment environment] (aka deployment tier). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "staging", "production" + // Note: `deployment.environment.name` does not affect the uniqueness + // constraints defined through + // the `service.namespace`, `service.name` and `service.instance.id` resource + // attributes. + // This implies that resources carrying the following attribute combinations + // MUST be + // considered to be identifying the same service: + // + // - `service.name=frontend`, `deployment.environment.name=production` + // - `service.name=frontend`, `deployment.environment.name=staging`. + // + // + // [deployment environment]: https://wikipedia.org/wiki/Deployment_environment + DeploymentEnvironmentNameKey = attribute.Key("deployment.environment.name") + + // DeploymentIDKey is the attribute Key conforming to the "deployment.id" + // semantic conventions. It represents the id of the deployment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1208" + DeploymentIDKey = attribute.Key("deployment.id") + + // DeploymentNameKey is the attribute Key conforming to the "deployment.name" + // semantic conventions. It represents the name of the deployment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "deploy my app", "deploy-frontend" + DeploymentNameKey = attribute.Key("deployment.name") + + // DeploymentStatusKey is the attribute Key conforming to the + // "deployment.status" semantic conventions. It represents the status of the + // deployment. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + DeploymentStatusKey = attribute.Key("deployment.status") +) + +// DeploymentEnvironmentName returns an attribute KeyValue conforming to the +// "deployment.environment.name" semantic conventions. It represents the name of +// the [deployment environment] (aka deployment tier). +// +// [deployment environment]: https://wikipedia.org/wiki/Deployment_environment +func DeploymentEnvironmentName(val string) attribute.KeyValue { + return DeploymentEnvironmentNameKey.String(val) +} + +// DeploymentID returns an attribute KeyValue conforming to the "deployment.id" +// semantic conventions. It represents the id of the deployment. +func DeploymentID(val string) attribute.KeyValue { + return DeploymentIDKey.String(val) +} + +// DeploymentName returns an attribute KeyValue conforming to the +// "deployment.name" semantic conventions. It represents the name of the +// deployment. +func DeploymentName(val string) attribute.KeyValue { + return DeploymentNameKey.String(val) +} + +// Enum values for deployment.status +var ( + // failed + // Stability: development + DeploymentStatusFailed = DeploymentStatusKey.String("failed") + // succeeded + // Stability: development + DeploymentStatusSucceeded = DeploymentStatusKey.String("succeeded") +) + +// Namespace: destination +const ( + // DestinationAddressKey is the attribute Key conforming to the + // "destination.address" semantic conventions. It represents the destination + // address - domain name if available without reverse DNS lookup; otherwise, IP + // address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "destination.example.com", "10.1.2.80", "/tmp/my.sock" + // Note: When observed from the source side, and when communicating through an + // intermediary, `destination.address` SHOULD represent the destination address + // behind any intermediaries, for example proxies, if it's available. + DestinationAddressKey = attribute.Key("destination.address") + + // DestinationPortKey is the attribute Key conforming to the "destination.port" + // semantic conventions. It represents the destination port number. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 3389, 2888 + DestinationPortKey = attribute.Key("destination.port") +) + +// DestinationAddress returns an attribute KeyValue conforming to the +// "destination.address" semantic conventions. It represents the destination +// address - domain name if available without reverse DNS lookup; otherwise, IP +// address or Unix domain socket name. +func DestinationAddress(val string) attribute.KeyValue { + return DestinationAddressKey.String(val) +} + +// DestinationPort returns an attribute KeyValue conforming to the +// "destination.port" semantic conventions. It represents the destination port +// number. +func DestinationPort(val int) attribute.KeyValue { + return DestinationPortKey.Int(val) +} + +// Namespace: device +const ( + // DeviceIDKey is the attribute Key conforming to the "device.id" semantic + // conventions. It represents a unique identifier representing the device. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "123456789012345", "01:23:45:67:89:AB" + // Note: Its value SHOULD be identical for all apps on a device and it SHOULD + // NOT change if an app is uninstalled and re-installed. + // However, it might be resettable by the user for all apps on a device. + // Hardware IDs (e.g. vendor-specific serial number, IMEI or MAC address) MAY be + // used as values. + // + // More information about Android identifier best practices can be found [here] + // . + // + // > [!WARNING]> This attribute may contain sensitive (PII) information. Caution + // > should be taken when storing personal data or anything which can identify a + // > user. GDPR and data protection laws may apply, + // > ensure you do your own due diligence.> Due to these reasons, this + // > identifier is not recommended for consumer applications and will likely + // > result in rejection from both Google Play and App Store. + // > However, it may be appropriate for specific enterprise scenarios, such as + // > kiosk devices or enterprise-managed devices, with appropriate compliance + // > clearance. + // > Any instrumentation providing this identifier MUST implement it as an + // > opt-in feature.> See [`app.installation.id`]> for a more + // > privacy-preserving alternative. + // + // [here]: https://developer.android.com/training/articles/user-data-ids + // [`app.installation.id`]: /docs/registry/attributes/app.md#app-installation-id + DeviceIDKey = attribute.Key("device.id") + + // DeviceManufacturerKey is the attribute Key conforming to the + // "device.manufacturer" semantic conventions. It represents the name of the + // device manufacturer. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Apple", "Samsung" + // Note: The Android OS provides this field via [Build]. iOS apps SHOULD + // hardcode the value `Apple`. + // + // [Build]: https://developer.android.com/reference/android/os/Build#MANUFACTURER + DeviceManufacturerKey = attribute.Key("device.manufacturer") + + // DeviceModelIdentifierKey is the attribute Key conforming to the + // "device.model.identifier" semantic conventions. It represents the model + // identifier for the device. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "iPhone3,4", "SM-G920F" + // Note: It's recommended this value represents a machine-readable version of + // the model identifier rather than the market or consumer-friendly name of the + // device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + + // DeviceModelNameKey is the attribute Key conforming to the "device.model.name" + // semantic conventions. It represents the marketing name for the device model. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "iPhone 6s Plus", "Samsung Galaxy S6" + // Note: It's recommended this value represents a human-readable version of the + // device model rather than a machine-readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") +) + +// DeviceID returns an attribute KeyValue conforming to the "device.id" semantic +// conventions. It represents a unique identifier representing the device. +func DeviceID(val string) attribute.KeyValue { + return DeviceIDKey.String(val) +} + +// DeviceManufacturer returns an attribute KeyValue conforming to the +// "device.manufacturer" semantic conventions. It represents the name of the +// device manufacturer. +func DeviceManufacturer(val string) attribute.KeyValue { + return DeviceManufacturerKey.String(val) +} + +// DeviceModelIdentifier returns an attribute KeyValue conforming to the +// "device.model.identifier" semantic conventions. It represents the model +// identifier for the device. +func DeviceModelIdentifier(val string) attribute.KeyValue { + return DeviceModelIdentifierKey.String(val) +} + +// DeviceModelName returns an attribute KeyValue conforming to the +// "device.model.name" semantic conventions. It represents the marketing name for +// the device model. +func DeviceModelName(val string) attribute.KeyValue { + return DeviceModelNameKey.String(val) +} + +// Namespace: disk +const ( + // DiskIODirectionKey is the attribute Key conforming to the "disk.io.direction" + // semantic conventions. It represents the disk IO operation direction. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "read" + DiskIODirectionKey = attribute.Key("disk.io.direction") +) + +// Enum values for disk.io.direction +var ( + // read + // Stability: development + DiskIODirectionRead = DiskIODirectionKey.String("read") + // write + // Stability: development + DiskIODirectionWrite = DiskIODirectionKey.String("write") +) + +// Namespace: dns +const ( + // DNSQuestionNameKey is the attribute Key conforming to the "dns.question.name" + // semantic conventions. It represents the name being queried. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "www.example.com", "opentelemetry.io" + // Note: If the name field contains non-printable characters (below 32 or above + // 126), those characters should be represented as escaped base 10 integers + // (\DDD). Back slashes and quotes should be escaped. Tabs, carriage returns, + // and line feeds should be converted to \t, \r, and \n respectively. + DNSQuestionNameKey = attribute.Key("dns.question.name") +) + +// DNSQuestionName returns an attribute KeyValue conforming to the +// "dns.question.name" semantic conventions. It represents the name being +// queried. +func DNSQuestionName(val string) attribute.KeyValue { + return DNSQuestionNameKey.String(val) +} + +// Namespace: elasticsearch +const ( + // ElasticsearchNodeNameKey is the attribute Key conforming to the + // "elasticsearch.node.name" semantic conventions. It represents the represents + // the human-readable identifier of the node/instance to which a request was + // routed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "instance-0000000001" + ElasticsearchNodeNameKey = attribute.Key("elasticsearch.node.name") +) + +// ElasticsearchNodeName returns an attribute KeyValue conforming to the +// "elasticsearch.node.name" semantic conventions. It represents the represents +// the human-readable identifier of the node/instance to which a request was +// routed. +func ElasticsearchNodeName(val string) attribute.KeyValue { + return ElasticsearchNodeNameKey.String(val) +} + +// Namespace: enduser +const ( + // EnduserIDKey is the attribute Key conforming to the "enduser.id" semantic + // conventions. It represents the unique identifier of an end user in the + // system. It maybe a username, email address, or other identifier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "username" + // Note: Unique identifier of an end user in the system. + // + // > [!Warning] + // > This field contains sensitive (PII) information. + EnduserIDKey = attribute.Key("enduser.id") + + // EnduserPseudoIDKey is the attribute Key conforming to the "enduser.pseudo.id" + // semantic conventions. It represents the pseudonymous identifier of an end + // user. This identifier should be a random value that is not directly linked or + // associated with the end user's actual identity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "QdH5CAWJgqVT4rOr0qtumf" + // Note: Pseudonymous identifier of an end user. + // + // > [!Warning] + // > This field contains sensitive (linkable PII) information. + EnduserPseudoIDKey = attribute.Key("enduser.pseudo.id") +) + +// EnduserID returns an attribute KeyValue conforming to the "enduser.id" +// semantic conventions. It represents the unique identifier of an end user in +// the system. It maybe a username, email address, or other identifier. +func EnduserID(val string) attribute.KeyValue { + return EnduserIDKey.String(val) +} + +// EnduserPseudoID returns an attribute KeyValue conforming to the +// "enduser.pseudo.id" semantic conventions. It represents the pseudonymous +// identifier of an end user. This identifier should be a random value that is +// not directly linked or associated with the end user's actual identity. +func EnduserPseudoID(val string) attribute.KeyValue { + return EnduserPseudoIDKey.String(val) +} + +// Namespace: error +const ( + // ErrorMessageKey is the attribute Key conforming to the "error.message" + // semantic conventions. It represents a message providing more detail about an + // error in human-readable form. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Unexpected input type: string", "The user has exceeded their + // storage quota" + // Note: `error.message` should provide additional context and detail about an + // error. + // It is NOT RECOMMENDED to duplicate the value of `error.type` in + // `error.message`. + // It is also NOT RECOMMENDED to duplicate the value of `exception.message` in + // `error.message`. + // + // `error.message` is NOT RECOMMENDED for metrics or spans due to its unbounded + // cardinality and overlap with span status. + ErrorMessageKey = attribute.Key("error.message") + + // ErrorTypeKey is the attribute Key conforming to the "error.type" semantic + // conventions. It represents the describes a class of error the operation ended + // with. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "timeout", "java.net.UnknownHostException", + // "server_certificate_invalid", "500" + // Note: The `error.type` SHOULD be predictable, and SHOULD have low + // cardinality. + // + // When `error.type` is set to a type (e.g., an exception type), its + // canonical class name identifying the type within the artifact SHOULD be used. + // + // Instrumentations SHOULD document the list of errors they report. + // + // The cardinality of `error.type` within one instrumentation library SHOULD be + // low. + // Telemetry consumers that aggregate data from multiple instrumentation + // libraries and applications + // should be prepared for `error.type` to have high cardinality at query time + // when no + // additional filters are applied. + // + // If the operation has completed successfully, instrumentations SHOULD NOT set + // `error.type`. + // + // If a specific domain defines its own set of error identifiers (such as HTTP + // or gRPC status codes), + // it's RECOMMENDED to: + // + // - Use a domain-specific attribute + // - Set `error.type` to capture all errors, regardless of whether they are + // defined within the domain-specific set or not. + ErrorTypeKey = attribute.Key("error.type") +) + +// ErrorMessage returns an attribute KeyValue conforming to the "error.message" +// semantic conventions. It represents a message providing more detail about an +// error in human-readable form. +func ErrorMessage(val string) attribute.KeyValue { + return ErrorMessageKey.String(val) +} + +// Enum values for error.type +var ( + // A fallback error value to be used when the instrumentation doesn't define a + // custom value. + // + // Stability: stable + ErrorTypeOther = ErrorTypeKey.String("_OTHER") +) + +// Namespace: exception +const ( + // ExceptionMessageKey is the attribute Key conforming to the + // "exception.message" semantic conventions. It represents the exception + // message. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "Division by zero", "Can't convert 'int' object to str implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + + // ExceptionStacktraceKey is the attribute Key conforming to the + // "exception.stacktrace" semantic conventions. It represents a stacktrace as a + // string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: Exception in thread "main" java.lang.RuntimeException: Test + // exception\n at com.example.GenerateTrace.methodB(GenerateTrace.java:13)\n at + // com.example.GenerateTrace.methodA(GenerateTrace.java:9)\n at + // com.example.GenerateTrace.main(GenerateTrace.java:5) + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") + + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of the exception (its + // fully-qualified class name, if applicable). The dynamic type of the exception + // should be preferred over the static type in languages that support it. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "java.net.ConnectException", "OSError" + ExceptionTypeKey = attribute.Key("exception.type") +) + +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception message. +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) +} + +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) +} + +// ExceptionType returns an attribute KeyValue conforming to the "exception.type" +// semantic conventions. It represents the type of the exception (its +// fully-qualified class name, if applicable). The dynamic type of the exception +// should be preferred over the static type in languages that support it. +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) +} + +// Namespace: faas +const ( + // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" + // semantic conventions. It represents a boolean that is true if the serverless + // function is executed for the first time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + FaaSColdstartKey = attribute.Key("faas.coldstart") + + // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic + // conventions. It represents a string containing the schedule period as + // [Cron Expression]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0/5 * * * ? * + // + // [Cron Expression]: https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm + FaaSCronKey = attribute.Key("faas.cron") + + // FaaSDocumentCollectionKey is the attribute Key conforming to the + // "faas.document.collection" semantic conventions. It represents the name of + // the source on which the triggering operation was performed. For example, in + // Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the + // database name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "myBucketName", "myDbName" + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + + // FaaSDocumentNameKey is the attribute Key conforming to the + // "faas.document.name" semantic conventions. It represents the document + // name/table subjected to the operation. For example, in Cloud Storage or S3 is + // the name of the file, and in Cosmos DB the table name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "myFile.txt", "myTableName" + FaaSDocumentNameKey = attribute.Key("faas.document.name") + + // FaaSDocumentOperationKey is the attribute Key conforming to the + // "faas.document.operation" semantic conventions. It represents the describes + // the type of the operation that was performed on the data. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + + // FaaSDocumentTimeKey is the attribute Key conforming to the + // "faas.document.time" semantic conventions. It represents a string containing + // the time when the data was accessed in the [ISO 8601] format expressed in + // [UTC]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 2020-01-23T13:47:06Z + // + // [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html + // [UTC]: https://www.w3.org/TR/NOTE-datetime + FaaSDocumentTimeKey = attribute.Key("faas.document.time") + + // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" + // semantic conventions. It represents the execution environment ID as a string, + // that will be potentially reused for other invocations to the same + // function/function version. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de" + // Note: - **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + + // FaaSInvocationIDKey is the attribute Key conforming to the + // "faas.invocation_id" semantic conventions. It represents the invocation ID of + // the current function invocation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: af9d5aa4-a685-4c5f-a22b-444f80b3cc28 + FaaSInvocationIDKey = attribute.Key("faas.invocation_id") + + // FaaSInvokedNameKey is the attribute Key conforming to the "faas.invoked_name" + // semantic conventions. It represents the name of the invoked function. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: my-function + // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked + // function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + + // FaaSInvokedProviderKey is the attribute Key conforming to the + // "faas.invoked_provider" semantic conventions. It represents the cloud + // provider of the invoked function. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the + // invoked function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + + // FaaSInvokedRegionKey is the attribute Key conforming to the + // "faas.invoked_region" semantic conventions. It represents the cloud region of + // the invoked function. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: eu-central-1 + // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked + // function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") + + // FaaSMaxMemoryKey is the attribute Key conforming to the "faas.max_memory" + // semantic conventions. It represents the amount of memory available to the + // serverless function converted to Bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Note: It's recommended to set this attribute since e.g. too little memory can + // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, + // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this + // information (which must be multiplied by 1,048,576). + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") + + // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic + // conventions. It represents the name of the single function that this runtime + // instance executes. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-function", "myazurefunctionapp/some-function-name" + // Note: This is the name of the function as configured/deployed on the FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function.name`] + // span attributes). + // + // For some cloud providers, the above definition is ambiguous. The following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud providers/products: + // + // - **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `cloud.resource_id` attribute). + // + // + // [`code.namespace`/`code.function.name`]: /docs/general/attributes.md#source-code-attributes + FaaSNameKey = attribute.Key("faas.name") + + // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic + // conventions. It represents a string containing the function invocation time + // in the [ISO 8601] format expressed in [UTC]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 2020-01-23T13:47:06Z + // + // [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html + // [UTC]: https://www.w3.org/TR/NOTE-datetime + FaaSTimeKey = attribute.Key("faas.time") + + // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" semantic + // conventions. It represents the type of the trigger which caused this function + // invocation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + FaaSTriggerKey = attribute.Key("faas.trigger") + + // FaaSVersionKey is the attribute Key conforming to the "faas.version" semantic + // conventions. It represents the immutable version of the function being + // executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "26", "pinkfroid-00002" + // Note: Depending on the cloud provider and platform, use: + // + // - **AWS Lambda:** The [function version] + // (an integer represented as a decimal string). + // - **Google Cloud Run (Services):** The [revision] + // (i.e., the function name plus the revision suffix). + // - **Google Cloud Functions:** The value of the + // [`K_REVISION` environment variable]. + // - **Azure Functions:** Not applicable. Do not set this attribute. + // + // + // [function version]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html + // [revision]: https://cloud.google.com/run/docs/managing/revisions + // [`K_REVISION` environment variable]: https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically + FaaSVersionKey = attribute.Key("faas.version") +) + +// FaaSColdstart returns an attribute KeyValue conforming to the "faas.coldstart" +// semantic conventions. It represents a boolean that is true if the serverless +// function is executed for the first time (aka cold-start). +func FaaSColdstart(val bool) attribute.KeyValue { + return FaaSColdstartKey.Bool(val) +} + +// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" semantic +// conventions. It represents a string containing the schedule period as +// [Cron Expression]. +// +// [Cron Expression]: https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm +func FaaSCron(val string) attribute.KeyValue { + return FaaSCronKey.String(val) +} + +// FaaSDocumentCollection returns an attribute KeyValue conforming to the +// "faas.document.collection" semantic conventions. It represents the name of the +// source on which the triggering operation was performed. For example, in Cloud +// Storage or S3 corresponds to the bucket name, and in Cosmos DB to the database +// name. +func FaaSDocumentCollection(val string) attribute.KeyValue { + return FaaSDocumentCollectionKey.String(val) +} + +// FaaSDocumentName returns an attribute KeyValue conforming to the +// "faas.document.name" semantic conventions. It represents the document +// name/table subjected to the operation. For example, in Cloud Storage or S3 is +// the name of the file, and in Cosmos DB the table name. +func FaaSDocumentName(val string) attribute.KeyValue { + return FaaSDocumentNameKey.String(val) +} + +// FaaSDocumentTime returns an attribute KeyValue conforming to the +// "faas.document.time" semantic conventions. It represents a string containing +// the time when the data was accessed in the [ISO 8601] format expressed in +// [UTC]. +// +// [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html +// [UTC]: https://www.w3.org/TR/NOTE-datetime +func FaaSDocumentTime(val string) attribute.KeyValue { + return FaaSDocumentTimeKey.String(val) +} + +// FaaSInstance returns an attribute KeyValue conforming to the "faas.instance" +// semantic conventions. It represents the execution environment ID as a string, +// that will be potentially reused for other invocations to the same +// function/function version. +func FaaSInstance(val string) attribute.KeyValue { + return FaaSInstanceKey.String(val) +} + +// FaaSInvocationID returns an attribute KeyValue conforming to the +// "faas.invocation_id" semantic conventions. It represents the invocation ID of +// the current function invocation. +func FaaSInvocationID(val string) attribute.KeyValue { + return FaaSInvocationIDKey.String(val) +} + +// FaaSInvokedName returns an attribute KeyValue conforming to the +// "faas.invoked_name" semantic conventions. It represents the name of the +// invoked function. +func FaaSInvokedName(val string) attribute.KeyValue { + return FaaSInvokedNameKey.String(val) +} + +// FaaSInvokedRegion returns an attribute KeyValue conforming to the +// "faas.invoked_region" semantic conventions. It represents the cloud region of +// the invoked function. +func FaaSInvokedRegion(val string) attribute.KeyValue { + return FaaSInvokedRegionKey.String(val) +} + +// FaaSMaxMemory returns an attribute KeyValue conforming to the +// "faas.max_memory" semantic conventions. It represents the amount of memory +// available to the serverless function converted to Bytes. +func FaaSMaxMemory(val int) attribute.KeyValue { + return FaaSMaxMemoryKey.Int(val) +} + +// FaaSName returns an attribute KeyValue conforming to the "faas.name" semantic +// conventions. It represents the name of the single function that this runtime +// instance executes. +func FaaSName(val string) attribute.KeyValue { + return FaaSNameKey.String(val) +} + +// FaaSTime returns an attribute KeyValue conforming to the "faas.time" semantic +// conventions. It represents a string containing the function invocation time in +// the [ISO 8601] format expressed in [UTC]. +// +// [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html +// [UTC]: https://www.w3.org/TR/NOTE-datetime +func FaaSTime(val string) attribute.KeyValue { + return FaaSTimeKey.String(val) +} + +// FaaSVersion returns an attribute KeyValue conforming to the "faas.version" +// semantic conventions. It represents the immutable version of the function +// being executed. +func FaaSVersion(val string) attribute.KeyValue { + return FaaSVersionKey.String(val) +} + +// Enum values for faas.document.operation +var ( + // When a new object is created. + // Stability: development + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified. + // Stability: development + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted. + // Stability: development + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +// Enum values for faas.invoked_provider +var ( + // Alibaba Cloud + // Stability: development + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + // Stability: development + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + // Stability: development + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + // Stability: development + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + // Stability: development + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +// Enum values for faas.trigger +var ( + // A response to some data source operation such as a database or filesystem + // read/write + // Stability: development + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + // Stability: development + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + // Stability: development + FaaSTriggerPubSub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + // Stability: development + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + // Stability: development + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// Namespace: feature_flag +const ( + // FeatureFlagContextIDKey is the attribute Key conforming to the + // "feature_flag.context.id" semantic conventions. It represents the unique + // identifier for the flag evaluation context. For example, the targeting key. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "5157782b-2203-4c80-a857-dbbd5e7761db" + FeatureFlagContextIDKey = attribute.Key("feature_flag.context.id") + + // FeatureFlagKeyKey is the attribute Key conforming to the "feature_flag.key" + // semantic conventions. It represents the lookup key of the feature flag. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "logo-color" + FeatureFlagKeyKey = attribute.Key("feature_flag.key") + + // FeatureFlagProviderNameKey is the attribute Key conforming to the + // "feature_flag.provider.name" semantic conventions. It represents the + // identifies the feature flag provider. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Flag Manager" + FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider.name") + + // FeatureFlagResultReasonKey is the attribute Key conforming to the + // "feature_flag.result.reason" semantic conventions. It represents the reason + // code which shows how a feature flag value was determined. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "static", "targeting_match", "error", "default" + FeatureFlagResultReasonKey = attribute.Key("feature_flag.result.reason") + + // FeatureFlagResultValueKey is the attribute Key conforming to the + // "feature_flag.result.value" semantic conventions. It represents the evaluated + // value of the feature flag. + // + // Type: any + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "#ff0000", true, 3 + // Note: With some feature flag providers, feature flag results can be quite + // large or contain private or sensitive details. + // Because of this, `feature_flag.result.variant` is often the preferred + // attribute if it is available. + // + // It may be desirable to redact or otherwise limit the size and scope of + // `feature_flag.result.value` if possible. + // Because the evaluated flag value is unstructured and may be any type, it is + // left to the instrumentation author to determine how best to achieve this. + FeatureFlagResultValueKey = attribute.Key("feature_flag.result.value") + + // FeatureFlagResultVariantKey is the attribute Key conforming to the + // "feature_flag.result.variant" semantic conventions. It represents a semantic + // identifier for an evaluated flag value. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "red", "true", "on" + // Note: A semantic identifier, commonly referred to as a variant, provides a + // means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant `red` maybe be used for the value `#c05543`. + FeatureFlagResultVariantKey = attribute.Key("feature_flag.result.variant") + + // FeatureFlagSetIDKey is the attribute Key conforming to the + // "feature_flag.set.id" semantic conventions. It represents the identifier of + // the [flag set] to which the feature flag belongs. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "proj-1", "ab98sgs", "service1/dev" + // + // [flag set]: https://openfeature.dev/specification/glossary/#flag-set + FeatureFlagSetIDKey = attribute.Key("feature_flag.set.id") + + // FeatureFlagVersionKey is the attribute Key conforming to the + // "feature_flag.version" semantic conventions. It represents the version of the + // ruleset used during the evaluation. This may be any stable value which + // uniquely identifies the ruleset. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1", "01ABCDEF" + FeatureFlagVersionKey = attribute.Key("feature_flag.version") +) + +// FeatureFlagContextID returns an attribute KeyValue conforming to the +// "feature_flag.context.id" semantic conventions. It represents the unique +// identifier for the flag evaluation context. For example, the targeting key. +func FeatureFlagContextID(val string) attribute.KeyValue { + return FeatureFlagContextIDKey.String(val) +} + +// FeatureFlagKey returns an attribute KeyValue conforming to the +// "feature_flag.key" semantic conventions. It represents the lookup key of the +// feature flag. +func FeatureFlagKey(val string) attribute.KeyValue { + return FeatureFlagKeyKey.String(val) +} + +// FeatureFlagProviderName returns an attribute KeyValue conforming to the +// "feature_flag.provider.name" semantic conventions. It represents the +// identifies the feature flag provider. +func FeatureFlagProviderName(val string) attribute.KeyValue { + return FeatureFlagProviderNameKey.String(val) +} + +// FeatureFlagResultVariant returns an attribute KeyValue conforming to the +// "feature_flag.result.variant" semantic conventions. It represents a semantic +// identifier for an evaluated flag value. +func FeatureFlagResultVariant(val string) attribute.KeyValue { + return FeatureFlagResultVariantKey.String(val) +} + +// FeatureFlagSetID returns an attribute KeyValue conforming to the +// "feature_flag.set.id" semantic conventions. It represents the identifier of +// the [flag set] to which the feature flag belongs. +// +// [flag set]: https://openfeature.dev/specification/glossary/#flag-set +func FeatureFlagSetID(val string) attribute.KeyValue { + return FeatureFlagSetIDKey.String(val) +} + +// FeatureFlagVersion returns an attribute KeyValue conforming to the +// "feature_flag.version" semantic conventions. It represents the version of the +// ruleset used during the evaluation. This may be any stable value which +// uniquely identifies the ruleset. +func FeatureFlagVersion(val string) attribute.KeyValue { + return FeatureFlagVersionKey.String(val) +} + +// Enum values for feature_flag.result.reason +var ( + // The resolved value is static (no dynamic evaluation). + // Stability: development + FeatureFlagResultReasonStatic = FeatureFlagResultReasonKey.String("static") + // The resolved value fell back to a pre-configured value (no dynamic evaluation + // occurred or dynamic evaluation yielded no result). + // Stability: development + FeatureFlagResultReasonDefault = FeatureFlagResultReasonKey.String("default") + // The resolved value was the result of a dynamic evaluation, such as a rule or + // specific user-targeting. + // Stability: development + FeatureFlagResultReasonTargetingMatch = FeatureFlagResultReasonKey.String("targeting_match") + // The resolved value was the result of pseudorandom assignment. + // Stability: development + FeatureFlagResultReasonSplit = FeatureFlagResultReasonKey.String("split") + // The resolved value was retrieved from cache. + // Stability: development + FeatureFlagResultReasonCached = FeatureFlagResultReasonKey.String("cached") + // The resolved value was the result of the flag being disabled in the + // management system. + // Stability: development + FeatureFlagResultReasonDisabled = FeatureFlagResultReasonKey.String("disabled") + // The reason for the resolved value could not be determined. + // Stability: development + FeatureFlagResultReasonUnknown = FeatureFlagResultReasonKey.String("unknown") + // The resolved value is non-authoritative or possibly out of date + // Stability: development + FeatureFlagResultReasonStale = FeatureFlagResultReasonKey.String("stale") + // The resolved value was the result of an error. + // Stability: development + FeatureFlagResultReasonError = FeatureFlagResultReasonKey.String("error") +) + +// Namespace: file +const ( + // FileAccessedKey is the attribute Key conforming to the "file.accessed" + // semantic conventions. It represents the time when the file was last accessed, + // in ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T12:00:00Z" + // Note: This attribute might not be supported by some file systems — NFS, + // FAT32, in embedded OS, etc. + FileAccessedKey = attribute.Key("file.accessed") + + // FileAttributesKey is the attribute Key conforming to the "file.attributes" + // semantic conventions. It represents the array of file attributes. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "readonly", "hidden" + // Note: Attributes names depend on the OS or file system. Here’s a + // non-exhaustive list of values expected for this attribute: `archive`, + // `compressed`, `directory`, `encrypted`, `execute`, `hidden`, `immutable`, + // `journaled`, `read`, `readonly`, `symbolic link`, `system`, `temporary`, + // `write`. + FileAttributesKey = attribute.Key("file.attributes") + + // FileChangedKey is the attribute Key conforming to the "file.changed" semantic + // conventions. It represents the time when the file attributes or metadata was + // last changed, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T12:00:00Z" + // Note: `file.changed` captures the time when any of the file's properties or + // attributes (including the content) are changed, while `file.modified` + // captures the timestamp when the file content is modified. + FileChangedKey = attribute.Key("file.changed") + + // FileCreatedKey is the attribute Key conforming to the "file.created" semantic + // conventions. It represents the time when the file was created, in ISO 8601 + // format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T12:00:00Z" + // Note: This attribute might not be supported by some file systems — NFS, + // FAT32, in embedded OS, etc. + FileCreatedKey = attribute.Key("file.created") + + // FileDirectoryKey is the attribute Key conforming to the "file.directory" + // semantic conventions. It represents the directory where the file is located. + // It should include the drive letter, when appropriate. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/home/user", "C:\Program Files\MyApp" + FileDirectoryKey = attribute.Key("file.directory") + + // FileExtensionKey is the attribute Key conforming to the "file.extension" + // semantic conventions. It represents the file extension, excluding the leading + // dot. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "png", "gz" + // Note: When the file name has multiple extensions (example.tar.gz), only the + // last one should be captured ("gz", not "tar.gz"). + FileExtensionKey = attribute.Key("file.extension") + + // FileForkNameKey is the attribute Key conforming to the "file.fork_name" + // semantic conventions. It represents the name of the fork. A fork is + // additional data associated with a filesystem object. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Zone.Identifer" + // Note: On Linux, a resource fork is used to store additional data with a + // filesystem object. A file always has at least one fork for the data portion, + // and additional forks may exist. + // On NTFS, this is analogous to an Alternate Data Stream (ADS), and the default + // data stream for a file is just called $DATA. Zone.Identifier is commonly used + // by Windows to track contents downloaded from the Internet. An ADS is + // typically of the form: C:\path\to\filename.extension:some_fork_name, and + // some_fork_name is the value that should populate `fork_name`. + // `filename.extension` should populate `file.name`, and `extension` should + // populate `file.extension`. The full path, `file.path`, will include the fork + // name. + FileForkNameKey = attribute.Key("file.fork_name") + + // FileGroupIDKey is the attribute Key conforming to the "file.group.id" + // semantic conventions. It represents the primary Group ID (GID) of the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1000" + FileGroupIDKey = attribute.Key("file.group.id") + + // FileGroupNameKey is the attribute Key conforming to the "file.group.name" + // semantic conventions. It represents the primary group name of the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "users" + FileGroupNameKey = attribute.Key("file.group.name") + + // FileInodeKey is the attribute Key conforming to the "file.inode" semantic + // conventions. It represents the inode representing the file in the filesystem. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "256383" + FileInodeKey = attribute.Key("file.inode") + + // FileModeKey is the attribute Key conforming to the "file.mode" semantic + // conventions. It represents the mode of the file in octal representation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0640" + FileModeKey = attribute.Key("file.mode") + + // FileModifiedKey is the attribute Key conforming to the "file.modified" + // semantic conventions. It represents the time when the file content was last + // modified, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T12:00:00Z" + FileModifiedKey = attribute.Key("file.modified") + + // FileNameKey is the attribute Key conforming to the "file.name" semantic + // conventions. It represents the name of the file including the extension, + // without the directory. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "example.png" + FileNameKey = attribute.Key("file.name") + + // FileOwnerIDKey is the attribute Key conforming to the "file.owner.id" + // semantic conventions. It represents the user ID (UID) or security identifier + // (SID) of the file owner. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1000" + FileOwnerIDKey = attribute.Key("file.owner.id") + + // FileOwnerNameKey is the attribute Key conforming to the "file.owner.name" + // semantic conventions. It represents the username of the file owner. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "root" + FileOwnerNameKey = attribute.Key("file.owner.name") + + // FilePathKey is the attribute Key conforming to the "file.path" semantic + // conventions. It represents the full path to the file, including the file + // name. It should include the drive letter, when appropriate. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/home/alice/example.png", "C:\Program Files\MyApp\myapp.exe" + FilePathKey = attribute.Key("file.path") + + // FileSizeKey is the attribute Key conforming to the "file.size" semantic + // conventions. It represents the file size in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + FileSizeKey = attribute.Key("file.size") + + // FileSymbolicLinkTargetPathKey is the attribute Key conforming to the + // "file.symbolic_link.target_path" semantic conventions. It represents the path + // to the target of a symbolic link. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/usr/bin/python3" + // Note: This attribute is only applicable to symbolic links. + FileSymbolicLinkTargetPathKey = attribute.Key("file.symbolic_link.target_path") +) + +// FileAccessed returns an attribute KeyValue conforming to the "file.accessed" +// semantic conventions. It represents the time when the file was last accessed, +// in ISO 8601 format. +func FileAccessed(val string) attribute.KeyValue { + return FileAccessedKey.String(val) +} + +// FileAttributes returns an attribute KeyValue conforming to the +// "file.attributes" semantic conventions. It represents the array of file +// attributes. +func FileAttributes(val ...string) attribute.KeyValue { + return FileAttributesKey.StringSlice(val) +} + +// FileChanged returns an attribute KeyValue conforming to the "file.changed" +// semantic conventions. It represents the time when the file attributes or +// metadata was last changed, in ISO 8601 format. +func FileChanged(val string) attribute.KeyValue { + return FileChangedKey.String(val) +} + +// FileCreated returns an attribute KeyValue conforming to the "file.created" +// semantic conventions. It represents the time when the file was created, in ISO +// 8601 format. +func FileCreated(val string) attribute.KeyValue { + return FileCreatedKey.String(val) +} + +// FileDirectory returns an attribute KeyValue conforming to the "file.directory" +// semantic conventions. It represents the directory where the file is located. +// It should include the drive letter, when appropriate. +func FileDirectory(val string) attribute.KeyValue { + return FileDirectoryKey.String(val) +} + +// FileExtension returns an attribute KeyValue conforming to the "file.extension" +// semantic conventions. It represents the file extension, excluding the leading +// dot. +func FileExtension(val string) attribute.KeyValue { + return FileExtensionKey.String(val) +} + +// FileForkName returns an attribute KeyValue conforming to the "file.fork_name" +// semantic conventions. It represents the name of the fork. A fork is additional +// data associated with a filesystem object. +func FileForkName(val string) attribute.KeyValue { + return FileForkNameKey.String(val) +} + +// FileGroupID returns an attribute KeyValue conforming to the "file.group.id" +// semantic conventions. It represents the primary Group ID (GID) of the file. +func FileGroupID(val string) attribute.KeyValue { + return FileGroupIDKey.String(val) +} + +// FileGroupName returns an attribute KeyValue conforming to the +// "file.group.name" semantic conventions. It represents the primary group name +// of the file. +func FileGroupName(val string) attribute.KeyValue { + return FileGroupNameKey.String(val) +} + +// FileInode returns an attribute KeyValue conforming to the "file.inode" +// semantic conventions. It represents the inode representing the file in the +// filesystem. +func FileInode(val string) attribute.KeyValue { + return FileInodeKey.String(val) +} + +// FileMode returns an attribute KeyValue conforming to the "file.mode" semantic +// conventions. It represents the mode of the file in octal representation. +func FileMode(val string) attribute.KeyValue { + return FileModeKey.String(val) +} + +// FileModified returns an attribute KeyValue conforming to the "file.modified" +// semantic conventions. It represents the time when the file content was last +// modified, in ISO 8601 format. +func FileModified(val string) attribute.KeyValue { + return FileModifiedKey.String(val) +} + +// FileName returns an attribute KeyValue conforming to the "file.name" semantic +// conventions. It represents the name of the file including the extension, +// without the directory. +func FileName(val string) attribute.KeyValue { + return FileNameKey.String(val) +} + +// FileOwnerID returns an attribute KeyValue conforming to the "file.owner.id" +// semantic conventions. It represents the user ID (UID) or security identifier +// (SID) of the file owner. +func FileOwnerID(val string) attribute.KeyValue { + return FileOwnerIDKey.String(val) +} + +// FileOwnerName returns an attribute KeyValue conforming to the +// "file.owner.name" semantic conventions. It represents the username of the file +// owner. +func FileOwnerName(val string) attribute.KeyValue { + return FileOwnerNameKey.String(val) +} + +// FilePath returns an attribute KeyValue conforming to the "file.path" semantic +// conventions. It represents the full path to the file, including the file name. +// It should include the drive letter, when appropriate. +func FilePath(val string) attribute.KeyValue { + return FilePathKey.String(val) +} + +// FileSize returns an attribute KeyValue conforming to the "file.size" semantic +// conventions. It represents the file size in bytes. +func FileSize(val int) attribute.KeyValue { + return FileSizeKey.Int(val) +} + +// FileSymbolicLinkTargetPath returns an attribute KeyValue conforming to the +// "file.symbolic_link.target_path" semantic conventions. It represents the path +// to the target of a symbolic link. +func FileSymbolicLinkTargetPath(val string) attribute.KeyValue { + return FileSymbolicLinkTargetPathKey.String(val) +} + +// Namespace: gcp +const ( + // GCPAppHubApplicationContainerKey is the attribute Key conforming to the + // "gcp.apphub.application.container" semantic conventions. It represents the + // container within GCP where the AppHub application is defined. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "projects/my-container-project" + GCPAppHubApplicationContainerKey = attribute.Key("gcp.apphub.application.container") + + // GCPAppHubApplicationIDKey is the attribute Key conforming to the + // "gcp.apphub.application.id" semantic conventions. It represents the name of + // the application as configured in AppHub. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-application" + GCPAppHubApplicationIDKey = attribute.Key("gcp.apphub.application.id") + + // GCPAppHubApplicationLocationKey is the attribute Key conforming to the + // "gcp.apphub.application.location" semantic conventions. It represents the GCP + // zone or region where the application is defined. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "us-central1" + GCPAppHubApplicationLocationKey = attribute.Key("gcp.apphub.application.location") + + // GCPAppHubServiceCriticalityTypeKey is the attribute Key conforming to the + // "gcp.apphub.service.criticality_type" semantic conventions. It represents the + // criticality of a service indicates its importance to the business. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: [See AppHub type enum] + // + // [See AppHub type enum]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type + GCPAppHubServiceCriticalityTypeKey = attribute.Key("gcp.apphub.service.criticality_type") + + // GCPAppHubServiceEnvironmentTypeKey is the attribute Key conforming to the + // "gcp.apphub.service.environment_type" semantic conventions. It represents the + // environment of a service is the stage of a software lifecycle. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: [See AppHub environment type] + // + // [See AppHub environment type]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type_1 + GCPAppHubServiceEnvironmentTypeKey = attribute.Key("gcp.apphub.service.environment_type") + + // GCPAppHubServiceIDKey is the attribute Key conforming to the + // "gcp.apphub.service.id" semantic conventions. It represents the name of the + // service as configured in AppHub. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-service" + GCPAppHubServiceIDKey = attribute.Key("gcp.apphub.service.id") + + // GCPAppHubWorkloadCriticalityTypeKey is the attribute Key conforming to the + // "gcp.apphub.workload.criticality_type" semantic conventions. It represents + // the criticality of a workload indicates its importance to the business. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: [See AppHub type enum] + // + // [See AppHub type enum]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type + GCPAppHubWorkloadCriticalityTypeKey = attribute.Key("gcp.apphub.workload.criticality_type") + + // GCPAppHubWorkloadEnvironmentTypeKey is the attribute Key conforming to the + // "gcp.apphub.workload.environment_type" semantic conventions. It represents + // the environment of a workload is the stage of a software lifecycle. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: [See AppHub environment type] + // + // [See AppHub environment type]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type_1 + GCPAppHubWorkloadEnvironmentTypeKey = attribute.Key("gcp.apphub.workload.environment_type") + + // GCPAppHubWorkloadIDKey is the attribute Key conforming to the + // "gcp.apphub.workload.id" semantic conventions. It represents the name of the + // workload as configured in AppHub. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-workload" + GCPAppHubWorkloadIDKey = attribute.Key("gcp.apphub.workload.id") + + // GCPClientServiceKey is the attribute Key conforming to the + // "gcp.client.service" semantic conventions. It represents the identifies the + // Google Cloud service for which the official client library is intended. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "appengine", "run", "firestore", "alloydb", "spanner" + // Note: Intended to be a stable identifier for Google Cloud client libraries + // that is uniform across implementation languages. The value should be derived + // from the canonical service domain for the service; for example, + // 'foo.googleapis.com' should result in a value of 'foo'. + GCPClientServiceKey = attribute.Key("gcp.client.service") + + // GCPCloudRunJobExecutionKey is the attribute Key conforming to the + // "gcp.cloud_run.job.execution" semantic conventions. It represents the name of + // the Cloud Run [execution] being run for the Job, as set by the + // [`CLOUD_RUN_EXECUTION`] environment variable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "job-name-xxxx", "sample-job-mdw84" + // + // [execution]: https://cloud.google.com/run/docs/managing/job-executions + // [`CLOUD_RUN_EXECUTION`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars + GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") + + // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the + // "gcp.cloud_run.job.task_index" semantic conventions. It represents the index + // for a task within an execution as provided by the [`CLOUD_RUN_TASK_INDEX`] + // environment variable. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0, 1 + // + // [`CLOUD_RUN_TASK_INDEX`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars + GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") + + // GCPGCEInstanceHostnameKey is the attribute Key conforming to the + // "gcp.gce.instance.hostname" semantic conventions. It represents the hostname + // of a GCE instance. This is the full value of the default or [custom hostname] + // . + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-host1234.example.com", + // "sample-vm.us-west1-b.c.my-project.internal" + // + // [custom hostname]: https://cloud.google.com/compute/docs/instances/custom-hostname-vm + GCPGCEInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") + + // GCPGCEInstanceNameKey is the attribute Key conforming to the + // "gcp.gce.instance.name" semantic conventions. It represents the instance name + // of a GCE instance. This is the value provided by `host.name`, the visible + // name of the instance in the Cloud Console UI, and the prefix for the default + // hostname of the instance as defined by the [default internal DNS name]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "instance-1", "my-vm-name" + // + // [default internal DNS name]: https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names + GCPGCEInstanceNameKey = attribute.Key("gcp.gce.instance.name") +) + +// GCPAppHubApplicationContainer returns an attribute KeyValue conforming to the +// "gcp.apphub.application.container" semantic conventions. It represents the +// container within GCP where the AppHub application is defined. +func GCPAppHubApplicationContainer(val string) attribute.KeyValue { + return GCPAppHubApplicationContainerKey.String(val) +} + +// GCPAppHubApplicationID returns an attribute KeyValue conforming to the +// "gcp.apphub.application.id" semantic conventions. It represents the name of +// the application as configured in AppHub. +func GCPAppHubApplicationID(val string) attribute.KeyValue { + return GCPAppHubApplicationIDKey.String(val) +} + +// GCPAppHubApplicationLocation returns an attribute KeyValue conforming to the +// "gcp.apphub.application.location" semantic conventions. It represents the GCP +// zone or region where the application is defined. +func GCPAppHubApplicationLocation(val string) attribute.KeyValue { + return GCPAppHubApplicationLocationKey.String(val) +} + +// GCPAppHubServiceID returns an attribute KeyValue conforming to the +// "gcp.apphub.service.id" semantic conventions. It represents the name of the +// service as configured in AppHub. +func GCPAppHubServiceID(val string) attribute.KeyValue { + return GCPAppHubServiceIDKey.String(val) +} + +// GCPAppHubWorkloadID returns an attribute KeyValue conforming to the +// "gcp.apphub.workload.id" semantic conventions. It represents the name of the +// workload as configured in AppHub. +func GCPAppHubWorkloadID(val string) attribute.KeyValue { + return GCPAppHubWorkloadIDKey.String(val) +} + +// GCPClientService returns an attribute KeyValue conforming to the +// "gcp.client.service" semantic conventions. It represents the identifies the +// Google Cloud service for which the official client library is intended. +func GCPClientService(val string) attribute.KeyValue { + return GCPClientServiceKey.String(val) +} + +// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.execution" semantic conventions. It represents the name of +// the Cloud Run [execution] being run for the Job, as set by the +// [`CLOUD_RUN_EXECUTION`] environment variable. +// +// [execution]: https://cloud.google.com/run/docs/managing/job-executions +// [`CLOUD_RUN_EXECUTION`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars +func GCPCloudRunJobExecution(val string) attribute.KeyValue { + return GCPCloudRunJobExecutionKey.String(val) +} + +// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index +// for a task within an execution as provided by the [`CLOUD_RUN_TASK_INDEX`] +// environment variable. +// +// [`CLOUD_RUN_TASK_INDEX`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars +func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { + return GCPCloudRunJobTaskIndexKey.Int(val) +} + +// GCPGCEInstanceHostname returns an attribute KeyValue conforming to the +// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname +// of a GCE instance. This is the full value of the default or [custom hostname] +// . +// +// [custom hostname]: https://cloud.google.com/compute/docs/instances/custom-hostname-vm +func GCPGCEInstanceHostname(val string) attribute.KeyValue { + return GCPGCEInstanceHostnameKey.String(val) +} + +// GCPGCEInstanceName returns an attribute KeyValue conforming to the +// "gcp.gce.instance.name" semantic conventions. It represents the instance name +// of a GCE instance. This is the value provided by `host.name`, the visible name +// of the instance in the Cloud Console UI, and the prefix for the default +// hostname of the instance as defined by the [default internal DNS name]. +// +// [default internal DNS name]: https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names +func GCPGCEInstanceName(val string) attribute.KeyValue { + return GCPGCEInstanceNameKey.String(val) +} + +// Enum values for gcp.apphub.service.criticality_type +var ( + // Mission critical service. + // Stability: development + GCPAppHubServiceCriticalityTypeMissionCritical = GCPAppHubServiceCriticalityTypeKey.String("MISSION_CRITICAL") + // High impact. + // Stability: development + GCPAppHubServiceCriticalityTypeHigh = GCPAppHubServiceCriticalityTypeKey.String("HIGH") + // Medium impact. + // Stability: development + GCPAppHubServiceCriticalityTypeMedium = GCPAppHubServiceCriticalityTypeKey.String("MEDIUM") + // Low impact. + // Stability: development + GCPAppHubServiceCriticalityTypeLow = GCPAppHubServiceCriticalityTypeKey.String("LOW") +) + +// Enum values for gcp.apphub.service.environment_type +var ( + // Production environment. + // Stability: development + GCPAppHubServiceEnvironmentTypeProduction = GCPAppHubServiceEnvironmentTypeKey.String("PRODUCTION") + // Staging environment. + // Stability: development + GCPAppHubServiceEnvironmentTypeStaging = GCPAppHubServiceEnvironmentTypeKey.String("STAGING") + // Test environment. + // Stability: development + GCPAppHubServiceEnvironmentTypeTest = GCPAppHubServiceEnvironmentTypeKey.String("TEST") + // Development environment. + // Stability: development + GCPAppHubServiceEnvironmentTypeDevelopment = GCPAppHubServiceEnvironmentTypeKey.String("DEVELOPMENT") +) + +// Enum values for gcp.apphub.workload.criticality_type +var ( + // Mission critical service. + // Stability: development + GCPAppHubWorkloadCriticalityTypeMissionCritical = GCPAppHubWorkloadCriticalityTypeKey.String("MISSION_CRITICAL") + // High impact. + // Stability: development + GCPAppHubWorkloadCriticalityTypeHigh = GCPAppHubWorkloadCriticalityTypeKey.String("HIGH") + // Medium impact. + // Stability: development + GCPAppHubWorkloadCriticalityTypeMedium = GCPAppHubWorkloadCriticalityTypeKey.String("MEDIUM") + // Low impact. + // Stability: development + GCPAppHubWorkloadCriticalityTypeLow = GCPAppHubWorkloadCriticalityTypeKey.String("LOW") +) + +// Enum values for gcp.apphub.workload.environment_type +var ( + // Production environment. + // Stability: development + GCPAppHubWorkloadEnvironmentTypeProduction = GCPAppHubWorkloadEnvironmentTypeKey.String("PRODUCTION") + // Staging environment. + // Stability: development + GCPAppHubWorkloadEnvironmentTypeStaging = GCPAppHubWorkloadEnvironmentTypeKey.String("STAGING") + // Test environment. + // Stability: development + GCPAppHubWorkloadEnvironmentTypeTest = GCPAppHubWorkloadEnvironmentTypeKey.String("TEST") + // Development environment. + // Stability: development + GCPAppHubWorkloadEnvironmentTypeDevelopment = GCPAppHubWorkloadEnvironmentTypeKey.String("DEVELOPMENT") +) + +// Namespace: gen_ai +const ( + // GenAIAgentDescriptionKey is the attribute Key conforming to the + // "gen_ai.agent.description" semantic conventions. It represents the free-form + // description of the GenAI agent provided by the application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Helps with math problems", "Generates fiction stories" + GenAIAgentDescriptionKey = attribute.Key("gen_ai.agent.description") + + // GenAIAgentIDKey is the attribute Key conforming to the "gen_ai.agent.id" + // semantic conventions. It represents the unique identifier of the GenAI agent. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "asst_5j66UpCpwteGg4YSxUnt7lPY" + GenAIAgentIDKey = attribute.Key("gen_ai.agent.id") + + // GenAIAgentNameKey is the attribute Key conforming to the "gen_ai.agent.name" + // semantic conventions. It represents the human-readable name of the GenAI + // agent provided by the application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Math Tutor", "Fiction Writer" + GenAIAgentNameKey = attribute.Key("gen_ai.agent.name") + + // GenAIConversationIDKey is the attribute Key conforming to the + // "gen_ai.conversation.id" semantic conventions. It represents the unique + // identifier for a conversation (session, thread), used to store and correlate + // messages within this conversation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "conv_5j66UpCpwteGg4YSxUnt7lPY" + GenAIConversationIDKey = attribute.Key("gen_ai.conversation.id") + + // GenAIDataSourceIDKey is the attribute Key conforming to the + // "gen_ai.data_source.id" semantic conventions. It represents the data source + // identifier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "H7STPQYOND" + // Note: Data sources are used by AI agents and RAG applications to store + // grounding data. A data source may be an external database, object store, + // document collection, website, or any other storage system used by the GenAI + // agent or application. The `gen_ai.data_source.id` SHOULD match the identifier + // used by the GenAI system rather than a name specific to the external storage, + // such as a database or object store. Semantic conventions referencing + // `gen_ai.data_source.id` MAY also leverage additional attributes, such as + // `db.*`, to further identify and describe the data source. + GenAIDataSourceIDKey = attribute.Key("gen_ai.data_source.id") + + // GenAIOpenAIRequestServiceTierKey is the attribute Key conforming to the + // "gen_ai.openai.request.service_tier" semantic conventions. It represents the + // service tier requested. May be a specific tier, default, or auto. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "auto", "default" + GenAIOpenAIRequestServiceTierKey = attribute.Key("gen_ai.openai.request.service_tier") + + // GenAIOpenAIResponseServiceTierKey is the attribute Key conforming to the + // "gen_ai.openai.response.service_tier" semantic conventions. It represents the + // service tier used for the response. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "scale", "default" + GenAIOpenAIResponseServiceTierKey = attribute.Key("gen_ai.openai.response.service_tier") + + // GenAIOpenAIResponseSystemFingerprintKey is the attribute Key conforming to + // the "gen_ai.openai.response.system_fingerprint" semantic conventions. It + // represents a fingerprint to track any eventual change in the Generative AI + // environment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "fp_44709d6fcb" + GenAIOpenAIResponseSystemFingerprintKey = attribute.Key("gen_ai.openai.response.system_fingerprint") + + // GenAIOperationNameKey is the attribute Key conforming to the + // "gen_ai.operation.name" semantic conventions. It represents the name of the + // operation being performed. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: If one of the predefined values applies, but specific system uses a + // different name it's RECOMMENDED to document it in the semantic conventions + // for specific GenAI system and use system-specific name in the + // instrumentation. If a different name is not documented, instrumentation + // libraries SHOULD use applicable predefined value. + GenAIOperationNameKey = attribute.Key("gen_ai.operation.name") + + // GenAIOutputTypeKey is the attribute Key conforming to the + // "gen_ai.output.type" semantic conventions. It represents the represents the + // content type requested by the client. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: This attribute SHOULD be used when the client requests output of a + // specific type. The model may return zero or more outputs of this type. + // This attribute specifies the output modality and not the actual output + // format. For example, if an image is requested, the actual output could be a + // URL pointing to an image file. + // Additional output format details may be recorded in the future in the + // `gen_ai.output.{type}.*` attributes. + GenAIOutputTypeKey = attribute.Key("gen_ai.output.type") + + // GenAIRequestChoiceCountKey is the attribute Key conforming to the + // "gen_ai.request.choice.count" semantic conventions. It represents the target + // number of candidate completions to return. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 3 + GenAIRequestChoiceCountKey = attribute.Key("gen_ai.request.choice.count") + + // GenAIRequestEncodingFormatsKey is the attribute Key conforming to the + // "gen_ai.request.encoding_formats" semantic conventions. It represents the + // encoding formats requested in an embeddings operation, if specified. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "base64"], ["float", "binary" + // Note: In some GenAI systems the encoding formats are called embedding types. + // Also, some GenAI systems only accept a single format per request. + GenAIRequestEncodingFormatsKey = attribute.Key("gen_ai.request.encoding_formats") + + // GenAIRequestFrequencyPenaltyKey is the attribute Key conforming to the + // "gen_ai.request.frequency_penalty" semantic conventions. It represents the + // frequency penalty setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0.1 + GenAIRequestFrequencyPenaltyKey = attribute.Key("gen_ai.request.frequency_penalty") + + // GenAIRequestMaxTokensKey is the attribute Key conforming to the + // "gen_ai.request.max_tokens" semantic conventions. It represents the maximum + // number of tokens the model generates for a request. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 100 + GenAIRequestMaxTokensKey = attribute.Key("gen_ai.request.max_tokens") + + // GenAIRequestModelKey is the attribute Key conforming to the + // "gen_ai.request.model" semantic conventions. It represents the name of the + // GenAI model a request is being made to. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: gpt-4 + GenAIRequestModelKey = attribute.Key("gen_ai.request.model") + + // GenAIRequestPresencePenaltyKey is the attribute Key conforming to the + // "gen_ai.request.presence_penalty" semantic conventions. It represents the + // presence penalty setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0.1 + GenAIRequestPresencePenaltyKey = attribute.Key("gen_ai.request.presence_penalty") + + // GenAIRequestSeedKey is the attribute Key conforming to the + // "gen_ai.request.seed" semantic conventions. It represents the requests with + // same seed value more likely to return same result. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 100 + GenAIRequestSeedKey = attribute.Key("gen_ai.request.seed") + + // GenAIRequestStopSequencesKey is the attribute Key conforming to the + // "gen_ai.request.stop_sequences" semantic conventions. It represents the list + // of sequences that the model will use to stop generating further tokens. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "forest", "lived" + GenAIRequestStopSequencesKey = attribute.Key("gen_ai.request.stop_sequences") + + // GenAIRequestTemperatureKey is the attribute Key conforming to the + // "gen_ai.request.temperature" semantic conventions. It represents the + // temperature setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0.0 + GenAIRequestTemperatureKey = attribute.Key("gen_ai.request.temperature") + + // GenAIRequestTopKKey is the attribute Key conforming to the + // "gen_ai.request.top_k" semantic conventions. It represents the top_k sampling + // setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0 + GenAIRequestTopKKey = attribute.Key("gen_ai.request.top_k") + + // GenAIRequestTopPKey is the attribute Key conforming to the + // "gen_ai.request.top_p" semantic conventions. It represents the top_p sampling + // setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0 + GenAIRequestTopPKey = attribute.Key("gen_ai.request.top_p") + + // GenAIResponseFinishReasonsKey is the attribute Key conforming to the + // "gen_ai.response.finish_reasons" semantic conventions. It represents the + // array of reasons the model stopped generating tokens, corresponding to each + // generation received. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "stop"], ["stop", "length" + GenAIResponseFinishReasonsKey = attribute.Key("gen_ai.response.finish_reasons") + + // GenAIResponseIDKey is the attribute Key conforming to the + // "gen_ai.response.id" semantic conventions. It represents the unique + // identifier for the completion. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "chatcmpl-123" + GenAIResponseIDKey = attribute.Key("gen_ai.response.id") + + // GenAIResponseModelKey is the attribute Key conforming to the + // "gen_ai.response.model" semantic conventions. It represents the name of the + // model that generated the response. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "gpt-4-0613" + GenAIResponseModelKey = attribute.Key("gen_ai.response.model") + + // GenAISystemKey is the attribute Key conforming to the "gen_ai.system" + // semantic conventions. It represents the Generative AI product as identified + // by the client or server instrumentation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: openai + // Note: The `gen_ai.system` describes a family of GenAI models with specific + // model identified + // by `gen_ai.request.model` and `gen_ai.response.model` attributes. + // + // The actual GenAI product may differ from the one identified by the client. + // Multiple systems, including Azure OpenAI and Gemini, are accessible by OpenAI + // client + // libraries. In such cases, the `gen_ai.system` is set to `openai` based on the + // instrumentation's best knowledge, instead of the actual system. The + // `server.address` + // attribute may help identify the actual system in use for `openai`. + // + // For custom model, a custom friendly name SHOULD be used. + // If none of these options apply, the `gen_ai.system` SHOULD be set to `_OTHER` + // . + GenAISystemKey = attribute.Key("gen_ai.system") + + // GenAITokenTypeKey is the attribute Key conforming to the "gen_ai.token.type" + // semantic conventions. It represents the type of token being counted. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "input", "output" + GenAITokenTypeKey = attribute.Key("gen_ai.token.type") + + // GenAIToolCallIDKey is the attribute Key conforming to the + // "gen_ai.tool.call.id" semantic conventions. It represents the tool call + // identifier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "call_mszuSIzqtI65i1wAUOE8w5H4" + GenAIToolCallIDKey = attribute.Key("gen_ai.tool.call.id") + + // GenAIToolDescriptionKey is the attribute Key conforming to the + // "gen_ai.tool.description" semantic conventions. It represents the tool + // description. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Multiply two numbers" + GenAIToolDescriptionKey = attribute.Key("gen_ai.tool.description") + + // GenAIToolNameKey is the attribute Key conforming to the "gen_ai.tool.name" + // semantic conventions. It represents the name of the tool utilized by the + // agent. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Flights" + GenAIToolNameKey = attribute.Key("gen_ai.tool.name") + + // GenAIToolTypeKey is the attribute Key conforming to the "gen_ai.tool.type" + // semantic conventions. It represents the type of the tool utilized by the + // agent. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "function", "extension", "datastore" + // Note: Extension: A tool executed on the agent-side to directly call external + // APIs, bridging the gap between the agent and real-world systems. + // Agent-side operations involve actions that are performed by the agent on the + // server or within the agent's controlled environment. + // Function: A tool executed on the client-side, where the agent generates + // parameters for a predefined function, and the client executes the logic. + // Client-side operations are actions taken on the user's end or within the + // client application. + // Datastore: A tool used by the agent to access and query structured or + // unstructured external data for retrieval-augmented tasks or knowledge + // updates. + GenAIToolTypeKey = attribute.Key("gen_ai.tool.type") + + // GenAIUsageInputTokensKey is the attribute Key conforming to the + // "gen_ai.usage.input_tokens" semantic conventions. It represents the number of + // tokens used in the GenAI input (prompt). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 100 + GenAIUsageInputTokensKey = attribute.Key("gen_ai.usage.input_tokens") + + // GenAIUsageOutputTokensKey is the attribute Key conforming to the + // "gen_ai.usage.output_tokens" semantic conventions. It represents the number + // of tokens used in the GenAI response (completion). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 180 + GenAIUsageOutputTokensKey = attribute.Key("gen_ai.usage.output_tokens") +) + +// GenAIAgentDescription returns an attribute KeyValue conforming to the +// "gen_ai.agent.description" semantic conventions. It represents the free-form +// description of the GenAI agent provided by the application. +func GenAIAgentDescription(val string) attribute.KeyValue { + return GenAIAgentDescriptionKey.String(val) +} + +// GenAIAgentID returns an attribute KeyValue conforming to the "gen_ai.agent.id" +// semantic conventions. It represents the unique identifier of the GenAI agent. +func GenAIAgentID(val string) attribute.KeyValue { + return GenAIAgentIDKey.String(val) +} + +// GenAIAgentName returns an attribute KeyValue conforming to the +// "gen_ai.agent.name" semantic conventions. It represents the human-readable +// name of the GenAI agent provided by the application. +func GenAIAgentName(val string) attribute.KeyValue { + return GenAIAgentNameKey.String(val) +} + +// GenAIConversationID returns an attribute KeyValue conforming to the +// "gen_ai.conversation.id" semantic conventions. It represents the unique +// identifier for a conversation (session, thread), used to store and correlate +// messages within this conversation. +func GenAIConversationID(val string) attribute.KeyValue { + return GenAIConversationIDKey.String(val) +} + +// GenAIDataSourceID returns an attribute KeyValue conforming to the +// "gen_ai.data_source.id" semantic conventions. It represents the data source +// identifier. +func GenAIDataSourceID(val string) attribute.KeyValue { + return GenAIDataSourceIDKey.String(val) +} + +// GenAIOpenAIResponseServiceTier returns an attribute KeyValue conforming to the +// "gen_ai.openai.response.service_tier" semantic conventions. It represents the +// service tier used for the response. +func GenAIOpenAIResponseServiceTier(val string) attribute.KeyValue { + return GenAIOpenAIResponseServiceTierKey.String(val) +} + +// GenAIOpenAIResponseSystemFingerprint returns an attribute KeyValue conforming +// to the "gen_ai.openai.response.system_fingerprint" semantic conventions. It +// represents a fingerprint to track any eventual change in the Generative AI +// environment. +func GenAIOpenAIResponseSystemFingerprint(val string) attribute.KeyValue { + return GenAIOpenAIResponseSystemFingerprintKey.String(val) +} + +// GenAIRequestChoiceCount returns an attribute KeyValue conforming to the +// "gen_ai.request.choice.count" semantic conventions. It represents the target +// number of candidate completions to return. +func GenAIRequestChoiceCount(val int) attribute.KeyValue { + return GenAIRequestChoiceCountKey.Int(val) +} + +// GenAIRequestEncodingFormats returns an attribute KeyValue conforming to the +// "gen_ai.request.encoding_formats" semantic conventions. It represents the +// encoding formats requested in an embeddings operation, if specified. +func GenAIRequestEncodingFormats(val ...string) attribute.KeyValue { + return GenAIRequestEncodingFormatsKey.StringSlice(val) +} + +// GenAIRequestFrequencyPenalty returns an attribute KeyValue conforming to the +// "gen_ai.request.frequency_penalty" semantic conventions. It represents the +// frequency penalty setting for the GenAI request. +func GenAIRequestFrequencyPenalty(val float64) attribute.KeyValue { + return GenAIRequestFrequencyPenaltyKey.Float64(val) +} + +// GenAIRequestMaxTokens returns an attribute KeyValue conforming to the +// "gen_ai.request.max_tokens" semantic conventions. It represents the maximum +// number of tokens the model generates for a request. +func GenAIRequestMaxTokens(val int) attribute.KeyValue { + return GenAIRequestMaxTokensKey.Int(val) +} + +// GenAIRequestModel returns an attribute KeyValue conforming to the +// "gen_ai.request.model" semantic conventions. It represents the name of the +// GenAI model a request is being made to. +func GenAIRequestModel(val string) attribute.KeyValue { + return GenAIRequestModelKey.String(val) +} + +// GenAIRequestPresencePenalty returns an attribute KeyValue conforming to the +// "gen_ai.request.presence_penalty" semantic conventions. It represents the +// presence penalty setting for the GenAI request. +func GenAIRequestPresencePenalty(val float64) attribute.KeyValue { + return GenAIRequestPresencePenaltyKey.Float64(val) +} + +// GenAIRequestSeed returns an attribute KeyValue conforming to the +// "gen_ai.request.seed" semantic conventions. It represents the requests with +// same seed value more likely to return same result. +func GenAIRequestSeed(val int) attribute.KeyValue { + return GenAIRequestSeedKey.Int(val) +} + +// GenAIRequestStopSequences returns an attribute KeyValue conforming to the +// "gen_ai.request.stop_sequences" semantic conventions. It represents the list +// of sequences that the model will use to stop generating further tokens. +func GenAIRequestStopSequences(val ...string) attribute.KeyValue { + return GenAIRequestStopSequencesKey.StringSlice(val) +} + +// GenAIRequestTemperature returns an attribute KeyValue conforming to the +// "gen_ai.request.temperature" semantic conventions. It represents the +// temperature setting for the GenAI request. +func GenAIRequestTemperature(val float64) attribute.KeyValue { + return GenAIRequestTemperatureKey.Float64(val) +} + +// GenAIRequestTopK returns an attribute KeyValue conforming to the +// "gen_ai.request.top_k" semantic conventions. It represents the top_k sampling +// setting for the GenAI request. +func GenAIRequestTopK(val float64) attribute.KeyValue { + return GenAIRequestTopKKey.Float64(val) +} + +// GenAIRequestTopP returns an attribute KeyValue conforming to the +// "gen_ai.request.top_p" semantic conventions. It represents the top_p sampling +// setting for the GenAI request. +func GenAIRequestTopP(val float64) attribute.KeyValue { + return GenAIRequestTopPKey.Float64(val) +} + +// GenAIResponseFinishReasons returns an attribute KeyValue conforming to the +// "gen_ai.response.finish_reasons" semantic conventions. It represents the array +// of reasons the model stopped generating tokens, corresponding to each +// generation received. +func GenAIResponseFinishReasons(val ...string) attribute.KeyValue { + return GenAIResponseFinishReasonsKey.StringSlice(val) +} + +// GenAIResponseID returns an attribute KeyValue conforming to the +// "gen_ai.response.id" semantic conventions. It represents the unique identifier +// for the completion. +func GenAIResponseID(val string) attribute.KeyValue { + return GenAIResponseIDKey.String(val) +} + +// GenAIResponseModel returns an attribute KeyValue conforming to the +// "gen_ai.response.model" semantic conventions. It represents the name of the +// model that generated the response. +func GenAIResponseModel(val string) attribute.KeyValue { + return GenAIResponseModelKey.String(val) +} + +// GenAIToolCallID returns an attribute KeyValue conforming to the +// "gen_ai.tool.call.id" semantic conventions. It represents the tool call +// identifier. +func GenAIToolCallID(val string) attribute.KeyValue { + return GenAIToolCallIDKey.String(val) +} + +// GenAIToolDescription returns an attribute KeyValue conforming to the +// "gen_ai.tool.description" semantic conventions. It represents the tool +// description. +func GenAIToolDescription(val string) attribute.KeyValue { + return GenAIToolDescriptionKey.String(val) +} + +// GenAIToolName returns an attribute KeyValue conforming to the +// "gen_ai.tool.name" semantic conventions. It represents the name of the tool +// utilized by the agent. +func GenAIToolName(val string) attribute.KeyValue { + return GenAIToolNameKey.String(val) +} + +// GenAIToolType returns an attribute KeyValue conforming to the +// "gen_ai.tool.type" semantic conventions. It represents the type of the tool +// utilized by the agent. +func GenAIToolType(val string) attribute.KeyValue { + return GenAIToolTypeKey.String(val) +} + +// GenAIUsageInputTokens returns an attribute KeyValue conforming to the +// "gen_ai.usage.input_tokens" semantic conventions. It represents the number of +// tokens used in the GenAI input (prompt). +func GenAIUsageInputTokens(val int) attribute.KeyValue { + return GenAIUsageInputTokensKey.Int(val) +} + +// GenAIUsageOutputTokens returns an attribute KeyValue conforming to the +// "gen_ai.usage.output_tokens" semantic conventions. It represents the number of +// tokens used in the GenAI response (completion). +func GenAIUsageOutputTokens(val int) attribute.KeyValue { + return GenAIUsageOutputTokensKey.Int(val) +} + +// Enum values for gen_ai.openai.request.service_tier +var ( + // The system will utilize scale tier credits until they are exhausted. + // Stability: development + GenAIOpenAIRequestServiceTierAuto = GenAIOpenAIRequestServiceTierKey.String("auto") + // The system will utilize the default scale tier. + // Stability: development + GenAIOpenAIRequestServiceTierDefault = GenAIOpenAIRequestServiceTierKey.String("default") +) + +// Enum values for gen_ai.operation.name +var ( + // Chat completion operation such as [OpenAI Chat API] + // Stability: development + // + // [OpenAI Chat API]: https://platform.openai.com/docs/api-reference/chat + GenAIOperationNameChat = GenAIOperationNameKey.String("chat") + // Multimodal content generation operation such as [Gemini Generate Content] + // Stability: development + // + // [Gemini Generate Content]: https://ai.google.dev/api/generate-content + GenAIOperationNameGenerateContent = GenAIOperationNameKey.String("generate_content") + // Text completions operation such as [OpenAI Completions API (Legacy)] + // Stability: development + // + // [OpenAI Completions API (Legacy)]: https://platform.openai.com/docs/api-reference/completions + GenAIOperationNameTextCompletion = GenAIOperationNameKey.String("text_completion") + // Embeddings operation such as [OpenAI Create embeddings API] + // Stability: development + // + // [OpenAI Create embeddings API]: https://platform.openai.com/docs/api-reference/embeddings/create + GenAIOperationNameEmbeddings = GenAIOperationNameKey.String("embeddings") + // Create GenAI agent + // Stability: development + GenAIOperationNameCreateAgent = GenAIOperationNameKey.String("create_agent") + // Invoke GenAI agent + // Stability: development + GenAIOperationNameInvokeAgent = GenAIOperationNameKey.String("invoke_agent") + // Execute a tool + // Stability: development + GenAIOperationNameExecuteTool = GenAIOperationNameKey.String("execute_tool") +) + +// Enum values for gen_ai.output.type +var ( + // Plain text + // Stability: development + GenAIOutputTypeText = GenAIOutputTypeKey.String("text") + // JSON object with known or unknown schema + // Stability: development + GenAIOutputTypeJSON = GenAIOutputTypeKey.String("json") + // Image + // Stability: development + GenAIOutputTypeImage = GenAIOutputTypeKey.String("image") + // Speech + // Stability: development + GenAIOutputTypeSpeech = GenAIOutputTypeKey.String("speech") +) + +// Enum values for gen_ai.system +var ( + // OpenAI + // Stability: development + GenAISystemOpenAI = GenAISystemKey.String("openai") + // Any Google generative AI endpoint + // Stability: development + GenAISystemGCPGenAI = GenAISystemKey.String("gcp.gen_ai") + // Vertex AI + // Stability: development + GenAISystemGCPVertexAI = GenAISystemKey.String("gcp.vertex_ai") + // Gemini + // Stability: development + GenAISystemGCPGemini = GenAISystemKey.String("gcp.gemini") + // Deprecated: Use 'gcp.vertex_ai' instead. + GenAISystemVertexAI = GenAISystemKey.String("vertex_ai") + // Deprecated: Use 'gcp.gemini' instead. + GenAISystemGemini = GenAISystemKey.String("gemini") + // Anthropic + // Stability: development + GenAISystemAnthropic = GenAISystemKey.String("anthropic") + // Cohere + // Stability: development + GenAISystemCohere = GenAISystemKey.String("cohere") + // Azure AI Inference + // Stability: development + GenAISystemAzAIInference = GenAISystemKey.String("az.ai.inference") + // Azure OpenAI + // Stability: development + GenAISystemAzAIOpenAI = GenAISystemKey.String("az.ai.openai") + // IBM Watsonx AI + // Stability: development + GenAISystemIBMWatsonxAI = GenAISystemKey.String("ibm.watsonx.ai") + // AWS Bedrock + // Stability: development + GenAISystemAWSBedrock = GenAISystemKey.String("aws.bedrock") + // Perplexity + // Stability: development + GenAISystemPerplexity = GenAISystemKey.String("perplexity") + // xAI + // Stability: development + GenAISystemXai = GenAISystemKey.String("xai") + // DeepSeek + // Stability: development + GenAISystemDeepseek = GenAISystemKey.String("deepseek") + // Groq + // Stability: development + GenAISystemGroq = GenAISystemKey.String("groq") + // Mistral AI + // Stability: development + GenAISystemMistralAI = GenAISystemKey.String("mistral_ai") +) + +// Enum values for gen_ai.token.type +var ( + // Input tokens (prompt, input, etc.) + // Stability: development + GenAITokenTypeInput = GenAITokenTypeKey.String("input") + // Deprecated: Replaced by `output`. + GenAITokenTypeCompletion = GenAITokenTypeKey.String("output") + // Output tokens (completion, response, etc.) + // Stability: development + GenAITokenTypeOutput = GenAITokenTypeKey.String("output") +) + +// Namespace: geo +const ( + // GeoContinentCodeKey is the attribute Key conforming to the + // "geo.continent.code" semantic conventions. It represents the two-letter code + // representing continent’s name. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + GeoContinentCodeKey = attribute.Key("geo.continent.code") + + // GeoCountryISOCodeKey is the attribute Key conforming to the + // "geo.country.iso_code" semantic conventions. It represents the two-letter ISO + // Country Code ([ISO 3166-1 alpha2]). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CA" + // + // [ISO 3166-1 alpha2]: https://wikipedia.org/wiki/ISO_3166-1#Codes + GeoCountryISOCodeKey = attribute.Key("geo.country.iso_code") + + // GeoLocalityNameKey is the attribute Key conforming to the "geo.locality.name" + // semantic conventions. It represents the locality name. Represents the name of + // a city, town, village, or similar populated place. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Montreal", "Berlin" + GeoLocalityNameKey = attribute.Key("geo.locality.name") + + // GeoLocationLatKey is the attribute Key conforming to the "geo.location.lat" + // semantic conventions. It represents the latitude of the geo location in + // [WGS84]. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 45.505918 + // + // [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 + GeoLocationLatKey = attribute.Key("geo.location.lat") + + // GeoLocationLonKey is the attribute Key conforming to the "geo.location.lon" + // semantic conventions. It represents the longitude of the geo location in + // [WGS84]. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: -73.61483 + // + // [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 + GeoLocationLonKey = attribute.Key("geo.location.lon") + + // GeoPostalCodeKey is the attribute Key conforming to the "geo.postal_code" + // semantic conventions. It represents the postal code associated with the + // location. Values appropriate for this field may also be known as a postcode + // or ZIP code and will vary widely from country to country. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "94040" + GeoPostalCodeKey = attribute.Key("geo.postal_code") + + // GeoRegionISOCodeKey is the attribute Key conforming to the + // "geo.region.iso_code" semantic conventions. It represents the region ISO code + // ([ISO 3166-2]). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CA-QC" + // + // [ISO 3166-2]: https://wikipedia.org/wiki/ISO_3166-2 + GeoRegionISOCodeKey = attribute.Key("geo.region.iso_code") +) + +// GeoCountryISOCode returns an attribute KeyValue conforming to the +// "geo.country.iso_code" semantic conventions. It represents the two-letter ISO +// Country Code ([ISO 3166-1 alpha2]). +// +// [ISO 3166-1 alpha2]: https://wikipedia.org/wiki/ISO_3166-1#Codes +func GeoCountryISOCode(val string) attribute.KeyValue { + return GeoCountryISOCodeKey.String(val) +} + +// GeoLocalityName returns an attribute KeyValue conforming to the +// "geo.locality.name" semantic conventions. It represents the locality name. +// Represents the name of a city, town, village, or similar populated place. +func GeoLocalityName(val string) attribute.KeyValue { + return GeoLocalityNameKey.String(val) +} + +// GeoLocationLat returns an attribute KeyValue conforming to the +// "geo.location.lat" semantic conventions. It represents the latitude of the geo +// location in [WGS84]. +// +// [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 +func GeoLocationLat(val float64) attribute.KeyValue { + return GeoLocationLatKey.Float64(val) +} + +// GeoLocationLon returns an attribute KeyValue conforming to the +// "geo.location.lon" semantic conventions. It represents the longitude of the +// geo location in [WGS84]. +// +// [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 +func GeoLocationLon(val float64) attribute.KeyValue { + return GeoLocationLonKey.Float64(val) +} + +// GeoPostalCode returns an attribute KeyValue conforming to the +// "geo.postal_code" semantic conventions. It represents the postal code +// associated with the location. Values appropriate for this field may also be +// known as a postcode or ZIP code and will vary widely from country to country. +func GeoPostalCode(val string) attribute.KeyValue { + return GeoPostalCodeKey.String(val) +} + +// GeoRegionISOCode returns an attribute KeyValue conforming to the +// "geo.region.iso_code" semantic conventions. It represents the region ISO code +// ([ISO 3166-2]). +// +// [ISO 3166-2]: https://wikipedia.org/wiki/ISO_3166-2 +func GeoRegionISOCode(val string) attribute.KeyValue { + return GeoRegionISOCodeKey.String(val) +} + +// Enum values for geo.continent.code +var ( + // Africa + // Stability: development + GeoContinentCodeAf = GeoContinentCodeKey.String("AF") + // Antarctica + // Stability: development + GeoContinentCodeAn = GeoContinentCodeKey.String("AN") + // Asia + // Stability: development + GeoContinentCodeAs = GeoContinentCodeKey.String("AS") + // Europe + // Stability: development + GeoContinentCodeEu = GeoContinentCodeKey.String("EU") + // North America + // Stability: development + GeoContinentCodeNa = GeoContinentCodeKey.String("NA") + // Oceania + // Stability: development + GeoContinentCodeOc = GeoContinentCodeKey.String("OC") + // South America + // Stability: development + GeoContinentCodeSa = GeoContinentCodeKey.String("SA") +) + +// Namespace: go +const ( + // GoMemoryTypeKey is the attribute Key conforming to the "go.memory.type" + // semantic conventions. It represents the type of memory. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "other", "stack" + GoMemoryTypeKey = attribute.Key("go.memory.type") +) + +// Enum values for go.memory.type +var ( + // Memory allocated from the heap that is reserved for stack space, whether or + // not it is currently in-use. + // Stability: development + GoMemoryTypeStack = GoMemoryTypeKey.String("stack") + // Memory used by the Go runtime, excluding other categories of memory usage + // described in this enumeration. + // Stability: development + GoMemoryTypeOther = GoMemoryTypeKey.String("other") +) + +// Namespace: graphql +const ( + // GraphQLDocumentKey is the attribute Key conforming to the "graphql.document" + // semantic conventions. It represents the GraphQL document being executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: query findBookById { bookById(id: ?) { name } } + // Note: The value may be sanitized to exclude sensitive information. + GraphQLDocumentKey = attribute.Key("graphql.document") + + // GraphQLOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of the + // operation being executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: findBookById + GraphQLOperationNameKey = attribute.Key("graphql.operation.name") + + // GraphQLOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of the + // operation being executed. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "query", "mutation", "subscription" + GraphQLOperationTypeKey = attribute.Key("graphql.operation.type") +) + +// GraphQLDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphQLDocument(val string) attribute.KeyValue { + return GraphQLDocumentKey.String(val) +} + +// GraphQLOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphQLOperationName(val string) attribute.KeyValue { + return GraphQLOperationNameKey.String(val) +} + +// Enum values for graphql.operation.type +var ( + // GraphQL query + // Stability: development + GraphQLOperationTypeQuery = GraphQLOperationTypeKey.String("query") + // GraphQL mutation + // Stability: development + GraphQLOperationTypeMutation = GraphQLOperationTypeKey.String("mutation") + // GraphQL subscription + // Stability: development + GraphQLOperationTypeSubscription = GraphQLOperationTypeKey.String("subscription") +) + +// Namespace: heroku +const ( + // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" + // semantic conventions. It represents the unique identifier for the + // application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2daa2797-e42b-4624-9322-ec3f968df4da" + HerokuAppIDKey = attribute.Key("heroku.app.id") + + // HerokuReleaseCommitKey is the attribute Key conforming to the + // "heroku.release.commit" semantic conventions. It represents the commit hash + // for the current release. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "e6134959463efd8966b20e75b913cafe3f5ec" + HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") + + // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the + // "heroku.release.creation_timestamp" semantic conventions. It represents the + // time and date the release was created. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2022-10-23T18:00:42Z" + HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") +) + +// HerokuAppID returns an attribute KeyValue conforming to the "heroku.app.id" +// semantic conventions. It represents the unique identifier for the application. +func HerokuAppID(val string) attribute.KeyValue { + return HerokuAppIDKey.String(val) +} + +// HerokuReleaseCommit returns an attribute KeyValue conforming to the +// "heroku.release.commit" semantic conventions. It represents the commit hash +// for the current release. +func HerokuReleaseCommit(val string) attribute.KeyValue { + return HerokuReleaseCommitKey.String(val) +} + +// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming to the +// "heroku.release.creation_timestamp" semantic conventions. It represents the +// time and date the release was created. +func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { + return HerokuReleaseCreationTimestampKey.String(val) +} + +// Namespace: host +const ( + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is running + // on. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HostArchKey = attribute.Key("host.arch") + + // HostCPUCacheL2SizeKey is the attribute Key conforming to the + // "host.cpu.cache.l2.size" semantic conventions. It represents the amount of + // level 2 memory cache available to the processor (in Bytes). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 12288000 + HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size") + + // HostCPUFamilyKey is the attribute Key conforming to the "host.cpu.family" + // semantic conventions. It represents the family or generation of the CPU. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "6", "PA-RISC 1.1e" + HostCPUFamilyKey = attribute.Key("host.cpu.family") + + // HostCPUModelIDKey is the attribute Key conforming to the "host.cpu.model.id" + // semantic conventions. It represents the model identifier. It provides more + // granular information about the CPU, distinguishing it from other CPUs within + // the same family. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "6", "9000/778/B180L" + HostCPUModelIDKey = attribute.Key("host.cpu.model.id") + + // HostCPUModelNameKey is the attribute Key conforming to the + // "host.cpu.model.name" semantic conventions. It represents the model + // designation of the processor. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz" + HostCPUModelNameKey = attribute.Key("host.cpu.model.name") + + // HostCPUSteppingKey is the attribute Key conforming to the "host.cpu.stepping" + // semantic conventions. It represents the stepping or core revisions. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1", "r1p1" + HostCPUSteppingKey = attribute.Key("host.cpu.stepping") + + // HostCPUVendorIDKey is the attribute Key conforming to the + // "host.cpu.vendor.id" semantic conventions. It represents the processor + // manufacturer identifier. A maximum 12-character string. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "GenuineIntel" + // Note: [CPUID] command returns the vendor ID string in EBX, EDX and ECX + // registers. Writing these to memory in this order results in a 12-character + // string. + // + // [CPUID]: https://wiki.osdev.org/CPUID + HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id") + + // HostIDKey is the attribute Key conforming to the "host.id" semantic + // conventions. It represents the unique host ID. For Cloud, this must be the + // instance_id assigned by the cloud provider. For non-containerized systems, + // this should be the `machine-id`. See the table below for the sources to use + // to determine the `machine-id` based on operating system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "fdbf79e8af94cb7f9e8df36789187052" + HostIDKey = attribute.Key("host.id") + + // HostImageIDKey is the attribute Key conforming to the "host.image.id" + // semantic conventions. It represents the VM image ID or host OS image ID. For + // Cloud, this value is from the provider. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ami-07b06b442921831e5" + HostImageIDKey = attribute.Key("host.image.id") + + // HostImageNameKey is the attribute Key conforming to the "host.image.name" + // semantic conventions. It represents the name of the VM image or OS install + // the host was instantiated from. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "infra-ami-eks-worker-node-7d4ec78312", "CentOS-8-x86_64-1905" + HostImageNameKey = attribute.Key("host.image.name") + + // HostImageVersionKey is the attribute Key conforming to the + // "host.image.version" semantic conventions. It represents the version string + // of the VM image or host OS as defined in [Version Attributes]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0.1" + // + // [Version Attributes]: /docs/resource/README.md#version-attributes + HostImageVersionKey = attribute.Key("host.image.version") + + // HostIPKey is the attribute Key conforming to the "host.ip" semantic + // conventions. It represents the available IP addresses of the host, excluding + // loopback interfaces. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "192.168.1.140", "fe80::abc2:4a28:737a:609e" + // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 + // addresses MUST be specified in the [RFC 5952] format. + // + // [RFC 5952]: https://www.rfc-editor.org/rfc/rfc5952.html + HostIPKey = attribute.Key("host.ip") + + // HostMacKey is the attribute Key conforming to the "host.mac" semantic + // conventions. It represents the available MAC addresses of the host, excluding + // loopback interfaces. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "AC-DE-48-23-45-67", "AC-DE-48-23-45-67-01-9F" + // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal form]: as + // hyphen-separated octets in uppercase hexadecimal form from most to least + // significant. + // + // [IEEE RA hexadecimal form]: https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf + HostMacKey = attribute.Key("host.mac") + + // HostNameKey is the attribute Key conforming to the "host.name" semantic + // conventions. It represents the name of the host. On Unix systems, it may + // contain what the hostname command returns, or the fully qualified hostname, + // or another name specified by the user. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-test" + HostNameKey = attribute.Key("host.name") + + // HostTypeKey is the attribute Key conforming to the "host.type" semantic + // conventions. It represents the type of host. For Cloud, this must be the + // machine type. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "n1-standard-1" + HostTypeKey = attribute.Key("host.type") +) + +// HostCPUCacheL2Size returns an attribute KeyValue conforming to the +// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of +// level 2 memory cache available to the processor (in Bytes). +func HostCPUCacheL2Size(val int) attribute.KeyValue { + return HostCPUCacheL2SizeKey.Int(val) +} + +// HostCPUFamily returns an attribute KeyValue conforming to the +// "host.cpu.family" semantic conventions. It represents the family or generation +// of the CPU. +func HostCPUFamily(val string) attribute.KeyValue { + return HostCPUFamilyKey.String(val) +} + +// HostCPUModelID returns an attribute KeyValue conforming to the +// "host.cpu.model.id" semantic conventions. It represents the model identifier. +// It provides more granular information about the CPU, distinguishing it from +// other CPUs within the same family. +func HostCPUModelID(val string) attribute.KeyValue { + return HostCPUModelIDKey.String(val) +} + +// HostCPUModelName returns an attribute KeyValue conforming to the +// "host.cpu.model.name" semantic conventions. It represents the model +// designation of the processor. +func HostCPUModelName(val string) attribute.KeyValue { + return HostCPUModelNameKey.String(val) +} + +// HostCPUStepping returns an attribute KeyValue conforming to the +// "host.cpu.stepping" semantic conventions. It represents the stepping or core +// revisions. +func HostCPUStepping(val string) attribute.KeyValue { + return HostCPUSteppingKey.String(val) +} + +// HostCPUVendorID returns an attribute KeyValue conforming to the +// "host.cpu.vendor.id" semantic conventions. It represents the processor +// manufacturer identifier. A maximum 12-character string. +func HostCPUVendorID(val string) attribute.KeyValue { + return HostCPUVendorIDKey.String(val) +} + +// HostID returns an attribute KeyValue conforming to the "host.id" semantic +// conventions. It represents the unique host ID. For Cloud, this must be the +// instance_id assigned by the cloud provider. For non-containerized systems, +// this should be the `machine-id`. See the table below for the sources to use to +// determine the `machine-id` based on operating system. +func HostID(val string) attribute.KeyValue { + return HostIDKey.String(val) +} + +// HostImageID returns an attribute KeyValue conforming to the "host.image.id" +// semantic conventions. It represents the VM image ID or host OS image ID. For +// Cloud, this value is from the provider. +func HostImageID(val string) attribute.KeyValue { + return HostImageIDKey.String(val) +} + +// HostImageName returns an attribute KeyValue conforming to the +// "host.image.name" semantic conventions. It represents the name of the VM image +// or OS install the host was instantiated from. +func HostImageName(val string) attribute.KeyValue { + return HostImageNameKey.String(val) +} + +// HostImageVersion returns an attribute KeyValue conforming to the +// "host.image.version" semantic conventions. It represents the version string of +// the VM image or host OS as defined in [Version Attributes]. +// +// [Version Attributes]: /docs/resource/README.md#version-attributes +func HostImageVersion(val string) attribute.KeyValue { + return HostImageVersionKey.String(val) +} + +// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic +// conventions. It represents the available IP addresses of the host, excluding +// loopback interfaces. +func HostIP(val ...string) attribute.KeyValue { + return HostIPKey.StringSlice(val) +} + +// HostMac returns an attribute KeyValue conforming to the "host.mac" semantic +// conventions. It represents the available MAC addresses of the host, excluding +// loopback interfaces. +func HostMac(val ...string) attribute.KeyValue { + return HostMacKey.StringSlice(val) +} + +// HostName returns an attribute KeyValue conforming to the "host.name" semantic +// conventions. It represents the name of the host. On Unix systems, it may +// contain what the hostname command returns, or the fully qualified hostname, or +// another name specified by the user. +func HostName(val string) attribute.KeyValue { + return HostNameKey.String(val) +} + +// HostType returns an attribute KeyValue conforming to the "host.type" semantic +// conventions. It represents the type of host. For Cloud, this must be the +// machine type. +func HostType(val string) attribute.KeyValue { + return HostTypeKey.String(val) +} + +// Enum values for host.arch +var ( + // AMD64 + // Stability: development + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + // Stability: development + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + // Stability: development + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + // Stability: development + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + // Stability: development + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + // Stability: development + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + // Stability: development + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + // Stability: development + HostArchX86 = HostArchKey.String("x86") +) + +// Namespace: http +const ( + // HTTPConnectionStateKey is the attribute Key conforming to the + // "http.connection.state" semantic conventions. It represents the state of the + // HTTP connection in the HTTP connection pool. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "active", "idle" + HTTPConnectionStateKey = attribute.Key("http.connection.state") + + // HTTPRequestBodySizeKey is the attribute Key conforming to the + // "http.request.body.size" semantic conventions. It represents the size of the + // request payload body in bytes. This is the number of bytes transferred + // excluding headers and is often, but not always, present as the + // [Content-Length] header. For requests using transport encoding, this should + // be the compressed size. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length + HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") + + // HTTPRequestMethodKey is the attribute Key conforming to the + // "http.request.method" semantic conventions. It represents the HTTP request + // method. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "GET", "POST", "HEAD" + // Note: HTTP request method value SHOULD be "known" to the instrumentation. + // By default, this convention defines "known" methods as the ones listed in + // [RFC9110] + // and the PATCH method defined in [RFC5789]. + // + // If the HTTP request method is not known to instrumentation, it MUST set the + // `http.request.method` attribute to `_OTHER`. + // + // If the HTTP instrumentation could end up converting valid HTTP request + // methods to `_OTHER`, then it MUST provide a way to override + // the list of known HTTP methods. If this override is done via environment + // variable, then the environment variable MUST be named + // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated list of + // case-sensitive known HTTP methods + // (this list MUST be a full override of the default known method, it is not a + // list of known methods in addition to the defaults). + // + // HTTP method names are case-sensitive and `http.request.method` attribute + // value MUST match a known HTTP method name exactly. + // Instrumentations for specific web frameworks that consider HTTP methods to be + // case insensitive, SHOULD populate a canonical equivalent. + // Tracing instrumentations that do so, MUST also set + // `http.request.method_original` to the original value. + // + // [RFC9110]: https://www.rfc-editor.org/rfc/rfc9110.html#name-methods + // [RFC5789]: https://www.rfc-editor.org/rfc/rfc5789.html + HTTPRequestMethodKey = attribute.Key("http.request.method") + + // HTTPRequestMethodOriginalKey is the attribute Key conforming to the + // "http.request.method_original" semantic conventions. It represents the + // original HTTP method sent by the client in the request line. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "GeT", "ACL", "foo" + HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") + + // HTTPRequestResendCountKey is the attribute Key conforming to the + // "http.request.resend_count" semantic conventions. It represents the ordinal + // number of request resending attempt (for any reason, including redirects). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Note: The resend count SHOULD be updated each time an HTTP request gets + // resent by the client, regardless of what was the cause of the resending (e.g. + // redirection, authorization failure, 503 Server Unavailable, network issues, + // or any other). + HTTPRequestResendCountKey = attribute.Key("http.request.resend_count") + + // HTTPRequestSizeKey is the attribute Key conforming to the "http.request.size" + // semantic conventions. It represents the total size of the request in bytes. + // This should be the total number of bytes sent over the wire, including the + // request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), headers, and request + // body if any. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + HTTPRequestSizeKey = attribute.Key("http.request.size") + + // HTTPResponseBodySizeKey is the attribute Key conforming to the + // "http.response.body.size" semantic conventions. It represents the size of the + // response payload body in bytes. This is the number of bytes transferred + // excluding headers and is often, but not always, present as the + // [Content-Length] header. For requests using transport encoding, this should + // be the compressed size. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length + HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") + + // HTTPResponseSizeKey is the attribute Key conforming to the + // "http.response.size" semantic conventions. It represents the total size of + // the response in bytes. This should be the total number of bytes sent over the + // wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), + // headers, and response body and trailers if any. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + HTTPResponseSizeKey = attribute.Key("http.response.size") + + // HTTPResponseStatusCodeKey is the attribute Key conforming to the + // "http.response.status_code" semantic conventions. It represents the + // [HTTP response status code]. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 200 + // + // [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6 + HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") + + // HTTPRouteKey is the attribute Key conforming to the "http.route" semantic + // conventions. It represents the matched route, that is, the path template in + // the format used by the respective server framework. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "/users/:userID?", "{controller}/{action}/{id?}" + // Note: MUST NOT be populated when this is not supported by the HTTP server + // framework as the route attribute should have low-cardinality and the URI path + // can NOT substitute it. + // SHOULD include the [application root] if there is one. + // + // [application root]: /docs/http/http-spans.md#http-server-definitions + HTTPRouteKey = attribute.Key("http.route") +) + +// HTTPRequestBodySize returns an attribute KeyValue conforming to the +// "http.request.body.size" semantic conventions. It represents the size of the +// request payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func HTTPRequestBodySize(val int) attribute.KeyValue { + return HTTPRequestBodySizeKey.Int(val) +} + +// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the +// "http.request.method_original" semantic conventions. It represents the +// original HTTP method sent by the client in the request line. +func HTTPRequestMethodOriginal(val string) attribute.KeyValue { + return HTTPRequestMethodOriginalKey.String(val) +} + +// HTTPRequestResendCount returns an attribute KeyValue conforming to the +// "http.request.resend_count" semantic conventions. It represents the ordinal +// number of request resending attempt (for any reason, including redirects). +func HTTPRequestResendCount(val int) attribute.KeyValue { + return HTTPRequestResendCountKey.Int(val) +} + +// HTTPRequestSize returns an attribute KeyValue conforming to the +// "http.request.size" semantic conventions. It represents the total size of the +// request in bytes. This should be the total number of bytes sent over the wire, +// including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), headers, +// and request body if any. +func HTTPRequestSize(val int) attribute.KeyValue { + return HTTPRequestSizeKey.Int(val) +} + +// HTTPResponseBodySize returns an attribute KeyValue conforming to the +// "http.response.body.size" semantic conventions. It represents the size of the +// response payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func HTTPResponseBodySize(val int) attribute.KeyValue { + return HTTPResponseBodySizeKey.Int(val) +} + +// HTTPResponseSize returns an attribute KeyValue conforming to the +// "http.response.size" semantic conventions. It represents the total size of the +// response in bytes. This should be the total number of bytes sent over the +// wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), +// headers, and response body and trailers if any. +func HTTPResponseSize(val int) attribute.KeyValue { + return HTTPResponseSizeKey.Int(val) +} + +// HTTPResponseStatusCode returns an attribute KeyValue conforming to the +// "http.response.status_code" semantic conventions. It represents the +// [HTTP response status code]. +// +// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6 +func HTTPResponseStatusCode(val int) attribute.KeyValue { + return HTTPResponseStatusCodeKey.Int(val) +} + +// HTTPRoute returns an attribute KeyValue conforming to the "http.route" +// semantic conventions. It represents the matched route, that is, the path +// template in the format used by the respective server framework. +func HTTPRoute(val string) attribute.KeyValue { + return HTTPRouteKey.String(val) +} + +// Enum values for http.connection.state +var ( + // active state. + // Stability: development + HTTPConnectionStateActive = HTTPConnectionStateKey.String("active") + // idle state. + // Stability: development + HTTPConnectionStateIdle = HTTPConnectionStateKey.String("idle") +) + +// Enum values for http.request.method +var ( + // CONNECT method. + // Stability: stable + HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") + // DELETE method. + // Stability: stable + HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") + // GET method. + // Stability: stable + HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") + // HEAD method. + // Stability: stable + HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") + // OPTIONS method. + // Stability: stable + HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") + // PATCH method. + // Stability: stable + HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") + // POST method. + // Stability: stable + HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") + // PUT method. + // Stability: stable + HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") + // TRACE method. + // Stability: stable + HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") + // Any HTTP method that the instrumentation has no prior knowledge of. + // Stability: stable + HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") +) + +// Namespace: hw +const ( + // HwIDKey is the attribute Key conforming to the "hw.id" semantic conventions. + // It represents an identifier for the hardware component, unique within the + // monitored host. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "win32battery_battery_testsysa33_1" + HwIDKey = attribute.Key("hw.id") + + // HwNameKey is the attribute Key conforming to the "hw.name" semantic + // conventions. It represents an easily-recognizable name for the hardware + // component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "eth0" + HwNameKey = attribute.Key("hw.name") + + // HwParentKey is the attribute Key conforming to the "hw.parent" semantic + // conventions. It represents the unique identifier of the parent component + // (typically the `hw.id` attribute of the enclosure, or disk controller). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "dellStorage_perc_0" + HwParentKey = attribute.Key("hw.parent") + + // HwStateKey is the attribute Key conforming to the "hw.state" semantic + // conventions. It represents the current state of the component. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwStateKey = attribute.Key("hw.state") + + // HwTypeKey is the attribute Key conforming to the "hw.type" semantic + // conventions. It represents the type of the component. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: Describes the category of the hardware component for which `hw.state` + // is being reported. For example, `hw.type=temperature` along with + // `hw.state=degraded` would indicate that the temperature of the hardware + // component has been reported as `degraded`. + HwTypeKey = attribute.Key("hw.type") +) + +// HwID returns an attribute KeyValue conforming to the "hw.id" semantic +// conventions. It represents an identifier for the hardware component, unique +// within the monitored host. +func HwID(val string) attribute.KeyValue { + return HwIDKey.String(val) +} + +// HwName returns an attribute KeyValue conforming to the "hw.name" semantic +// conventions. It represents an easily-recognizable name for the hardware +// component. +func HwName(val string) attribute.KeyValue { + return HwNameKey.String(val) +} + +// HwParent returns an attribute KeyValue conforming to the "hw.parent" semantic +// conventions. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func HwParent(val string) attribute.KeyValue { + return HwParentKey.String(val) +} + +// Enum values for hw.state +var ( + // Ok + // Stability: development + HwStateOk = HwStateKey.String("ok") + // Degraded + // Stability: development + HwStateDegraded = HwStateKey.String("degraded") + // Failed + // Stability: development + HwStateFailed = HwStateKey.String("failed") +) + +// Enum values for hw.type +var ( + // Battery + // Stability: development + HwTypeBattery = HwTypeKey.String("battery") + // CPU + // Stability: development + HwTypeCPU = HwTypeKey.String("cpu") + // Disk controller + // Stability: development + HwTypeDiskController = HwTypeKey.String("disk_controller") + // Enclosure + // Stability: development + HwTypeEnclosure = HwTypeKey.String("enclosure") + // Fan + // Stability: development + HwTypeFan = HwTypeKey.String("fan") + // GPU + // Stability: development + HwTypeGpu = HwTypeKey.String("gpu") + // Logical disk + // Stability: development + HwTypeLogicalDisk = HwTypeKey.String("logical_disk") + // Memory + // Stability: development + HwTypeMemory = HwTypeKey.String("memory") + // Network + // Stability: development + HwTypeNetwork = HwTypeKey.String("network") + // Physical disk + // Stability: development + HwTypePhysicalDisk = HwTypeKey.String("physical_disk") + // Power supply + // Stability: development + HwTypePowerSupply = HwTypeKey.String("power_supply") + // Tape drive + // Stability: development + HwTypeTapeDrive = HwTypeKey.String("tape_drive") + // Temperature + // Stability: development + HwTypeTemperature = HwTypeKey.String("temperature") + // Voltage + // Stability: development + HwTypeVoltage = HwTypeKey.String("voltage") +) + +// Namespace: ios +const ( + // IOSAppStateKey is the attribute Key conforming to the "ios.app.state" + // semantic conventions. It represents the this attribute represents the state + // of the application. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: The iOS lifecycle states are defined in the + // [UIApplicationDelegate documentation], and from which the `OS terminology` + // column values are derived. + // + // [UIApplicationDelegate documentation]: https://developer.apple.com/documentation/uikit/uiapplicationdelegate + IOSAppStateKey = attribute.Key("ios.app.state") +) + +// Enum values for ios.app.state +var ( + // The app has become `active`. Associated with UIKit notification + // `applicationDidBecomeActive`. + // + // Stability: development + IOSAppStateActive = IOSAppStateKey.String("active") + // The app is now `inactive`. Associated with UIKit notification + // `applicationWillResignActive`. + // + // Stability: development + IOSAppStateInactive = IOSAppStateKey.String("inactive") + // The app is now in the background. This value is associated with UIKit + // notification `applicationDidEnterBackground`. + // + // Stability: development + IOSAppStateBackground = IOSAppStateKey.String("background") + // The app is now in the foreground. This value is associated with UIKit + // notification `applicationWillEnterForeground`. + // + // Stability: development + IOSAppStateForeground = IOSAppStateKey.String("foreground") + // The app is about to terminate. Associated with UIKit notification + // `applicationWillTerminate`. + // + // Stability: development + IOSAppStateTerminate = IOSAppStateKey.String("terminate") +) + +// Namespace: k8s +const ( + // K8SClusterNameKey is the attribute Key conforming to the "k8s.cluster.name" + // semantic conventions. It represents the name of the cluster. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-cluster" + K8SClusterNameKey = attribute.Key("k8s.cluster.name") + + // K8SClusterUIDKey is the attribute Key conforming to the "k8s.cluster.uid" + // semantic conventions. It represents a pseudo-ID for the cluster, set to the + // UID of the `kube-system` namespace. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: K8s doesn't have support for obtaining a cluster ID. If this is ever + // added, we will recommend collecting the `k8s.cluster.uid` through the + // official APIs. In the meantime, we are able to use the `uid` of the + // `kube-system` namespace as a proxy for cluster ID. Read on for the + // rationale. + // + // Every object created in a K8s cluster is assigned a distinct UID. The + // `kube-system` namespace is used by Kubernetes itself and will exist + // for the lifetime of the cluster. Using the `uid` of the `kube-system` + // namespace is a reasonable proxy for the K8s ClusterID as it will only + // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are + // UUIDs as standardized by + // [ISO/IEC 9834-8 and ITU-T X.667]. + // Which states: + // + // > If generated according to one of the mechanisms defined in Rec. + // > ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be + // > different from all other UUIDs generated before 3603 A.D., or is + // > extremely likely to be different (depending on the mechanism chosen). + // + // Therefore, UIDs between clusters should be extremely unlikely to + // conflict. + // + // [ISO/IEC 9834-8 and ITU-T X.667]: https://www.itu.int/ITU-T/studygroups/com17/oid.html + K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") + + // K8SContainerNameKey is the attribute Key conforming to the + // "k8s.container.name" semantic conventions. It represents the name of the + // Container from Pod specification, must be unique within a Pod. Container + // runtime usually uses different globally unique name (`container.name`). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "redis" + K8SContainerNameKey = attribute.Key("k8s.container.name") + + // K8SContainerRestartCountKey is the attribute Key conforming to the + // "k8s.container.restart_count" semantic conventions. It represents the number + // of times the container was restarted. This attribute can be used to identify + // a particular container (running or stopped) within a container spec. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") + + // K8SContainerStatusLastTerminatedReasonKey is the attribute Key conforming to + // the "k8s.container.status.last_terminated_reason" semantic conventions. It + // represents the last terminated reason of the Container. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Evicted", "Error" + K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason") + + // K8SCronJobNameKey is the attribute Key conforming to the "k8s.cronjob.name" + // semantic conventions. It represents the name of the CronJob. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") + + // K8SCronJobUIDKey is the attribute Key conforming to the "k8s.cronjob.uid" + // semantic conventions. It represents the UID of the CronJob. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + + // K8SDaemonSetNameKey is the attribute Key conforming to the + // "k8s.daemonset.name" semantic conventions. It represents the name of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") + + // K8SDaemonSetUIDKey is the attribute Key conforming to the "k8s.daemonset.uid" + // semantic conventions. It represents the UID of the DaemonSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + + // K8SDeploymentNameKey is the attribute Key conforming to the + // "k8s.deployment.name" semantic conventions. It represents the name of the + // Deployment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") + + // K8SDeploymentUIDKey is the attribute Key conforming to the + // "k8s.deployment.uid" semantic conventions. It represents the UID of the + // Deployment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + + // K8SHPANameKey is the attribute Key conforming to the "k8s.hpa.name" semantic + // conventions. It represents the name of the horizontal pod autoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SHPANameKey = attribute.Key("k8s.hpa.name") + + // K8SHPAUIDKey is the attribute Key conforming to the "k8s.hpa.uid" semantic + // conventions. It represents the UID of the horizontal pod autoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SHPAUIDKey = attribute.Key("k8s.hpa.uid") + + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" semantic + // conventions. It represents the name of the Job. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SJobNameKey = attribute.Key("k8s.job.name") + + // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" semantic + // conventions. It represents the UID of the Job. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SJobUIDKey = attribute.Key("k8s.job.uid") + + // K8SNamespaceNameKey is the attribute Key conforming to the + // "k8s.namespace.name" semantic conventions. It represents the name of the + // namespace that the pod is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "default" + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") + + // K8SNamespacePhaseKey is the attribute Key conforming to the + // "k8s.namespace.phase" semantic conventions. It represents the phase of the + // K8s namespace. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "active", "terminating" + // Note: This attribute aligns with the `phase` field of the + // [K8s NamespaceStatus] + // + // [K8s NamespaceStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#namespacestatus-v1-core + K8SNamespacePhaseKey = attribute.Key("k8s.namespace.phase") + + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" + // semantic conventions. It represents the name of the Node. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "node-1" + K8SNodeNameKey = attribute.Key("k8s.node.name") + + // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" semantic + // conventions. It represents the UID of the Node. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2" + K8SNodeUIDKey = attribute.Key("k8s.node.uid") + + // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" semantic + // conventions. It represents the name of the Pod. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-pod-autoconf" + K8SPodNameKey = attribute.Key("k8s.pod.name") + + // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" semantic + // conventions. It represents the UID of the Pod. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + + // K8SReplicaSetNameKey is the attribute Key conforming to the + // "k8s.replicaset.name" semantic conventions. It represents the name of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") + + // K8SReplicaSetUIDKey is the attribute Key conforming to the + // "k8s.replicaset.uid" semantic conventions. It represents the UID of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + + // K8SReplicationControllerNameKey is the attribute Key conforming to the + // "k8s.replicationcontroller.name" semantic conventions. It represents the name + // of the replication controller. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SReplicationControllerNameKey = attribute.Key("k8s.replicationcontroller.name") + + // K8SReplicationControllerUIDKey is the attribute Key conforming to the + // "k8s.replicationcontroller.uid" semantic conventions. It represents the UID + // of the replication controller. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SReplicationControllerUIDKey = attribute.Key("k8s.replicationcontroller.uid") + + // K8SResourceQuotaNameKey is the attribute Key conforming to the + // "k8s.resourcequota.name" semantic conventions. It represents the name of the + // resource quota. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SResourceQuotaNameKey = attribute.Key("k8s.resourcequota.name") + + // K8SResourceQuotaUIDKey is the attribute Key conforming to the + // "k8s.resourcequota.uid" semantic conventions. It represents the UID of the + // resource quota. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SResourceQuotaUIDKey = attribute.Key("k8s.resourcequota.uid") + + // K8SStatefulSetNameKey is the attribute Key conforming to the + // "k8s.statefulset.name" semantic conventions. It represents the name of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") + + // K8SStatefulSetUIDKey is the attribute Key conforming to the + // "k8s.statefulset.uid" semantic conventions. It represents the UID of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") + + // K8SVolumeNameKey is the attribute Key conforming to the "k8s.volume.name" + // semantic conventions. It represents the name of the K8s volume. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "volume0" + K8SVolumeNameKey = attribute.Key("k8s.volume.name") + + // K8SVolumeTypeKey is the attribute Key conforming to the "k8s.volume.type" + // semantic conventions. It represents the type of the K8s volume. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "emptyDir", "persistentVolumeClaim" + K8SVolumeTypeKey = attribute.Key("k8s.volume.type") +) + +// K8SClusterName returns an attribute KeyValue conforming to the +// "k8s.cluster.name" semantic conventions. It represents the name of the +// cluster. +func K8SClusterName(val string) attribute.KeyValue { + return K8SClusterNameKey.String(val) +} + +// K8SClusterUID returns an attribute KeyValue conforming to the +// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the +// cluster, set to the UID of the `kube-system` namespace. +func K8SClusterUID(val string) attribute.KeyValue { + return K8SClusterUIDKey.String(val) +} + +// K8SContainerName returns an attribute KeyValue conforming to the +// "k8s.container.name" semantic conventions. It represents the name of the +// Container from Pod specification, must be unique within a Pod. Container +// runtime usually uses different globally unique name (`container.name`). +func K8SContainerName(val string) attribute.KeyValue { + return K8SContainerNameKey.String(val) +} + +// K8SContainerRestartCount returns an attribute KeyValue conforming to the +// "k8s.container.restart_count" semantic conventions. It represents the number +// of times the container was restarted. This attribute can be used to identify a +// particular container (running or stopped) within a container spec. +func K8SContainerRestartCount(val int) attribute.KeyValue { + return K8SContainerRestartCountKey.Int(val) +} + +// K8SContainerStatusLastTerminatedReason returns an attribute KeyValue +// conforming to the "k8s.container.status.last_terminated_reason" semantic +// conventions. It represents the last terminated reason of the Container. +func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue { + return K8SContainerStatusLastTerminatedReasonKey.String(val) +} + +// K8SCronJobName returns an attribute KeyValue conforming to the +// "k8s.cronjob.name" semantic conventions. It represents the name of the +// CronJob. +func K8SCronJobName(val string) attribute.KeyValue { + return K8SCronJobNameKey.String(val) +} + +// K8SCronJobUID returns an attribute KeyValue conforming to the +// "k8s.cronjob.uid" semantic conventions. It represents the UID of the CronJob. +func K8SCronJobUID(val string) attribute.KeyValue { + return K8SCronJobUIDKey.String(val) +} + +// K8SDaemonSetName returns an attribute KeyValue conforming to the +// "k8s.daemonset.name" semantic conventions. It represents the name of the +// DaemonSet. +func K8SDaemonSetName(val string) attribute.KeyValue { + return K8SDaemonSetNameKey.String(val) +} + +// K8SDaemonSetUID returns an attribute KeyValue conforming to the +// "k8s.daemonset.uid" semantic conventions. It represents the UID of the +// DaemonSet. +func K8SDaemonSetUID(val string) attribute.KeyValue { + return K8SDaemonSetUIDKey.String(val) +} + +// K8SDeploymentName returns an attribute KeyValue conforming to the +// "k8s.deployment.name" semantic conventions. It represents the name of the +// Deployment. +func K8SDeploymentName(val string) attribute.KeyValue { + return K8SDeploymentNameKey.String(val) +} + +// K8SDeploymentUID returns an attribute KeyValue conforming to the +// "k8s.deployment.uid" semantic conventions. It represents the UID of the +// Deployment. +func K8SDeploymentUID(val string) attribute.KeyValue { + return K8SDeploymentUIDKey.String(val) +} + +// K8SHPAName returns an attribute KeyValue conforming to the "k8s.hpa.name" +// semantic conventions. It represents the name of the horizontal pod autoscaler. +func K8SHPAName(val string) attribute.KeyValue { + return K8SHPANameKey.String(val) +} + +// K8SHPAUID returns an attribute KeyValue conforming to the "k8s.hpa.uid" +// semantic conventions. It represents the UID of the horizontal pod autoscaler. +func K8SHPAUID(val string) attribute.KeyValue { + return K8SHPAUIDKey.String(val) +} + +// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" +// semantic conventions. It represents the name of the Job. +func K8SJobName(val string) attribute.KeyValue { + return K8SJobNameKey.String(val) +} + +// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" +// semantic conventions. It represents the UID of the Job. +func K8SJobUID(val string) attribute.KeyValue { + return K8SJobUIDKey.String(val) +} + +// K8SNamespaceName returns an attribute KeyValue conforming to the +// "k8s.namespace.name" semantic conventions. It represents the name of the +// namespace that the pod is running in. +func K8SNamespaceName(val string) attribute.KeyValue { + return K8SNamespaceNameKey.String(val) +} + +// K8SNodeName returns an attribute KeyValue conforming to the "k8s.node.name" +// semantic conventions. It represents the name of the Node. +func K8SNodeName(val string) attribute.KeyValue { + return K8SNodeNameKey.String(val) +} + +// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" +// semantic conventions. It represents the UID of the Node. +func K8SNodeUID(val string) attribute.KeyValue { + return K8SNodeUIDKey.String(val) +} + +// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" +// semantic conventions. It represents the name of the Pod. +func K8SPodName(val string) attribute.KeyValue { + return K8SPodNameKey.String(val) +} + +// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" +// semantic conventions. It represents the UID of the Pod. +func K8SPodUID(val string) attribute.KeyValue { + return K8SPodUIDKey.String(val) +} + +// K8SReplicaSetName returns an attribute KeyValue conforming to the +// "k8s.replicaset.name" semantic conventions. It represents the name of the +// ReplicaSet. +func K8SReplicaSetName(val string) attribute.KeyValue { + return K8SReplicaSetNameKey.String(val) +} + +// K8SReplicaSetUID returns an attribute KeyValue conforming to the +// "k8s.replicaset.uid" semantic conventions. It represents the UID of the +// ReplicaSet. +func K8SReplicaSetUID(val string) attribute.KeyValue { + return K8SReplicaSetUIDKey.String(val) +} + +// K8SReplicationControllerName returns an attribute KeyValue conforming to the +// "k8s.replicationcontroller.name" semantic conventions. It represents the name +// of the replication controller. +func K8SReplicationControllerName(val string) attribute.KeyValue { + return K8SReplicationControllerNameKey.String(val) +} + +// K8SReplicationControllerUID returns an attribute KeyValue conforming to the +// "k8s.replicationcontroller.uid" semantic conventions. It represents the UID of +// the replication controller. +func K8SReplicationControllerUID(val string) attribute.KeyValue { + return K8SReplicationControllerUIDKey.String(val) +} + +// K8SResourceQuotaName returns an attribute KeyValue conforming to the +// "k8s.resourcequota.name" semantic conventions. It represents the name of the +// resource quota. +func K8SResourceQuotaName(val string) attribute.KeyValue { + return K8SResourceQuotaNameKey.String(val) +} + +// K8SResourceQuotaUID returns an attribute KeyValue conforming to the +// "k8s.resourcequota.uid" semantic conventions. It represents the UID of the +// resource quota. +func K8SResourceQuotaUID(val string) attribute.KeyValue { + return K8SResourceQuotaUIDKey.String(val) +} + +// K8SStatefulSetName returns an attribute KeyValue conforming to the +// "k8s.statefulset.name" semantic conventions. It represents the name of the +// StatefulSet. +func K8SStatefulSetName(val string) attribute.KeyValue { + return K8SStatefulSetNameKey.String(val) +} + +// K8SStatefulSetUID returns an attribute KeyValue conforming to the +// "k8s.statefulset.uid" semantic conventions. It represents the UID of the +// StatefulSet. +func K8SStatefulSetUID(val string) attribute.KeyValue { + return K8SStatefulSetUIDKey.String(val) +} + +// K8SVolumeName returns an attribute KeyValue conforming to the +// "k8s.volume.name" semantic conventions. It represents the name of the K8s +// volume. +func K8SVolumeName(val string) attribute.KeyValue { + return K8SVolumeNameKey.String(val) +} + +// Enum values for k8s.namespace.phase +var ( + // Active namespace phase as described by [K8s API] + // Stability: development + // + // [K8s API]: https://pkg.go.dev/k8s.io/api@v0.31.3/core/v1#NamespacePhase + K8SNamespacePhaseActive = K8SNamespacePhaseKey.String("active") + // Terminating namespace phase as described by [K8s API] + // Stability: development + // + // [K8s API]: https://pkg.go.dev/k8s.io/api@v0.31.3/core/v1#NamespacePhase + K8SNamespacePhaseTerminating = K8SNamespacePhaseKey.String("terminating") +) + +// Enum values for k8s.volume.type +var ( + // A [persistentVolumeClaim] volume + // Stability: development + // + // [persistentVolumeClaim]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#persistentvolumeclaim + K8SVolumeTypePersistentVolumeClaim = K8SVolumeTypeKey.String("persistentVolumeClaim") + // A [configMap] volume + // Stability: development + // + // [configMap]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#configmap + K8SVolumeTypeConfigMap = K8SVolumeTypeKey.String("configMap") + // A [downwardAPI] volume + // Stability: development + // + // [downwardAPI]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#downwardapi + K8SVolumeTypeDownwardAPI = K8SVolumeTypeKey.String("downwardAPI") + // An [emptyDir] volume + // Stability: development + // + // [emptyDir]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#emptydir + K8SVolumeTypeEmptyDir = K8SVolumeTypeKey.String("emptyDir") + // A [secret] volume + // Stability: development + // + // [secret]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#secret + K8SVolumeTypeSecret = K8SVolumeTypeKey.String("secret") + // A [local] volume + // Stability: development + // + // [local]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#local + K8SVolumeTypeLocal = K8SVolumeTypeKey.String("local") +) + +// Namespace: linux +const ( + // LinuxMemorySlabStateKey is the attribute Key conforming to the + // "linux.memory.slab.state" semantic conventions. It represents the Linux Slab + // memory state. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "reclaimable", "unreclaimable" + LinuxMemorySlabStateKey = attribute.Key("linux.memory.slab.state") +) + +// Enum values for linux.memory.slab.state +var ( + // reclaimable + // Stability: development + LinuxMemorySlabStateReclaimable = LinuxMemorySlabStateKey.String("reclaimable") + // unreclaimable + // Stability: development + LinuxMemorySlabStateUnreclaimable = LinuxMemorySlabStateKey.String("unreclaimable") +) + +// Namespace: log +const ( + // LogFileNameKey is the attribute Key conforming to the "log.file.name" + // semantic conventions. It represents the basename of the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "audit.log" + LogFileNameKey = attribute.Key("log.file.name") + + // LogFileNameResolvedKey is the attribute Key conforming to the + // "log.file.name_resolved" semantic conventions. It represents the basename of + // the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "uuid.log" + LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") + + // LogFilePathKey is the attribute Key conforming to the "log.file.path" + // semantic conventions. It represents the full path to the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/var/log/mysql/audit.log" + LogFilePathKey = attribute.Key("log.file.path") + + // LogFilePathResolvedKey is the attribute Key conforming to the + // "log.file.path_resolved" semantic conventions. It represents the full path to + // the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/var/lib/docker/uuid.log" + LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") + + // LogIostreamKey is the attribute Key conforming to the "log.iostream" semantic + // conventions. It represents the stream associated with the log. See below for + // a list of well-known values. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + LogIostreamKey = attribute.Key("log.iostream") + + // LogRecordOriginalKey is the attribute Key conforming to the + // "log.record.original" semantic conventions. It represents the complete + // original Log Record. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "77 <86>1 2015-08-06T21:58:59.694Z 192.168.2.133 inactive - - - + // Something happened", "[INFO] 8/3/24 12:34:56 Something happened" + // Note: This value MAY be added when processing a Log Record which was + // originally transmitted as a string or equivalent data type AND the Body field + // of the Log Record does not contain the same value. (e.g. a syslog or a log + // record read from a file.) + LogRecordOriginalKey = attribute.Key("log.record.original") + + // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" + // semantic conventions. It represents a unique identifier for the Log Record. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "01ARZ3NDEKTSV4RRFFQ69G5FAV" + // Note: If an id is provided, other log records with the same id will be + // considered duplicates and can be removed safely. This means, that two + // distinguishable log records MUST have different values. + // The id MAY be an + // [Universally Unique Lexicographically Sortable Identifier (ULID)], but other + // identifiers (e.g. UUID) may be used as needed. + // + // [Universally Unique Lexicographically Sortable Identifier (ULID)]: https://github.com/ulid/spec + LogRecordUIDKey = attribute.Key("log.record.uid") +) + +// LogFileName returns an attribute KeyValue conforming to the "log.file.name" +// semantic conventions. It represents the basename of the file. +func LogFileName(val string) attribute.KeyValue { + return LogFileNameKey.String(val) +} + +// LogFileNameResolved returns an attribute KeyValue conforming to the +// "log.file.name_resolved" semantic conventions. It represents the basename of +// the file, with symlinks resolved. +func LogFileNameResolved(val string) attribute.KeyValue { + return LogFileNameResolvedKey.String(val) +} + +// LogFilePath returns an attribute KeyValue conforming to the "log.file.path" +// semantic conventions. It represents the full path to the file. +func LogFilePath(val string) attribute.KeyValue { + return LogFilePathKey.String(val) +} + +// LogFilePathResolved returns an attribute KeyValue conforming to the +// "log.file.path_resolved" semantic conventions. It represents the full path to +// the file, with symlinks resolved. +func LogFilePathResolved(val string) attribute.KeyValue { + return LogFilePathResolvedKey.String(val) +} + +// LogRecordOriginal returns an attribute KeyValue conforming to the +// "log.record.original" semantic conventions. It represents the complete +// original Log Record. +func LogRecordOriginal(val string) attribute.KeyValue { + return LogRecordOriginalKey.String(val) +} + +// LogRecordUID returns an attribute KeyValue conforming to the "log.record.uid" +// semantic conventions. It represents a unique identifier for the Log Record. +func LogRecordUID(val string) attribute.KeyValue { + return LogRecordUIDKey.String(val) +} + +// Enum values for log.iostream +var ( + // Logs from stdout stream + // Stability: development + LogIostreamStdout = LogIostreamKey.String("stdout") + // Events from stderr stream + // Stability: development + LogIostreamStderr = LogIostreamKey.String("stderr") +) + +// Namespace: messaging +const ( + // MessagingBatchMessageCountKey is the attribute Key conforming to the + // "messaging.batch.message_count" semantic conventions. It represents the + // number of messages sent, received, or processed in the scope of the batching + // operation. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on + // spans that operate with a single message. When a messaging client library + // supports both batch and single-message API for the same operation, + // instrumentations SHOULD use `messaging.batch.message_count` for batching APIs + // and SHOULD NOT use it for single-message APIs. + MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") + + // MessagingClientIDKey is the attribute Key conforming to the + // "messaging.client.id" semantic conventions. It represents a unique identifier + // for the client that consumes or produces a message. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "client-5", "myhost@8742@s8083jm" + MessagingClientIDKey = attribute.Key("messaging.client.id") + + // MessagingConsumerGroupNameKey is the attribute Key conforming to the + // "messaging.consumer.group.name" semantic conventions. It represents the name + // of the consumer group with which a consumer is associated. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-group", "indexer" + // Note: Semantic conventions for individual messaging systems SHOULD document + // whether `messaging.consumer.group.name` is applicable and what it means in + // the context of that system. + MessagingConsumerGroupNameKey = attribute.Key("messaging.consumer.group.name") + + // MessagingDestinationAnonymousKey is the attribute Key conforming to the + // "messaging.destination.anonymous" semantic conventions. It represents a + // boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") + + // MessagingDestinationNameKey is the attribute Key conforming to the + // "messaging.destination.name" semantic conventions. It represents the message + // destination name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MyQueue", "MyTopic" + // Note: Destination name SHOULD uniquely identify a specific queue, topic or + // other entity within the broker. If + // the broker doesn't have such notion, the destination name SHOULD uniquely + // identify the broker. + MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + + // MessagingDestinationPartitionIDKey is the attribute Key conforming to the + // "messaging.destination.partition.id" semantic conventions. It represents the + // identifier of the partition messages are sent to or received from, unique + // within the `messaging.destination.name`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1 + MessagingDestinationPartitionIDKey = attribute.Key("messaging.destination.partition.id") + + // MessagingDestinationSubscriptionNameKey is the attribute Key conforming to + // the "messaging.destination.subscription.name" semantic conventions. It + // represents the name of the destination subscription from which a message is + // consumed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "subscription-a" + // Note: Semantic conventions for individual messaging systems SHOULD document + // whether `messaging.destination.subscription.name` is applicable and what it + // means in the context of that system. + MessagingDestinationSubscriptionNameKey = attribute.Key("messaging.destination.subscription.name") + + // MessagingDestinationTemplateKey is the attribute Key conforming to the + // "messaging.destination.template" semantic conventions. It represents the low + // cardinality representation of the messaging destination name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/customers/{customerId}" + // Note: Destination names could be constructed from templates. An example would + // be a destination name involving a user name or product id. Although the + // destination name in this case is of high cardinality, the underlying template + // is of low cardinality and can be effectively used for grouping and + // aggregation. + MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") + + // MessagingDestinationTemporaryKey is the attribute Key conforming to the + // "messaging.destination.temporary" semantic conventions. It represents a + // boolean that is true if the message destination is temporary and might not + // exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") + + // MessagingEventHubsMessageEnqueuedTimeKey is the attribute Key conforming to + // the "messaging.eventhubs.message.enqueued_time" semantic conventions. It + // represents the UTC epoch seconds at which the message has been accepted and + // stored in the entity. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingEventHubsMessageEnqueuedTimeKey = attribute.Key("messaging.eventhubs.message.enqueued_time") + + // MessagingGCPPubSubMessageAckDeadlineKey is the attribute Key conforming to + // the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. It + // represents the ack deadline in seconds set for the modify ack deadline + // request. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingGCPPubSubMessageAckDeadlineKey = attribute.Key("messaging.gcp_pubsub.message.ack_deadline") + + // MessagingGCPPubSubMessageAckIDKey is the attribute Key conforming to the + // "messaging.gcp_pubsub.message.ack_id" semantic conventions. It represents the + // ack id for a given message. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: ack_id + MessagingGCPPubSubMessageAckIDKey = attribute.Key("messaging.gcp_pubsub.message.ack_id") + + // MessagingGCPPubSubMessageDeliveryAttemptKey is the attribute Key conforming + // to the "messaging.gcp_pubsub.message.delivery_attempt" semantic conventions. + // It represents the delivery attempt for a given message. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingGCPPubSubMessageDeliveryAttemptKey = attribute.Key("messaging.gcp_pubsub.message.delivery_attempt") + + // MessagingGCPPubSubMessageOrderingKeyKey is the attribute Key conforming to + // the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. It + // represents the ordering key for a given message. If the attribute is not + // present, the message does not have an ordering key. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: ordering_key + MessagingGCPPubSubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key") + + // MessagingKafkaMessageKeyKey is the attribute Key conforming to the + // "messaging.kafka.message.key" semantic conventions. It represents the message + // keys in Kafka are used for grouping alike messages to ensure they're + // processed on the same partition. They differ from `messaging.message.id` in + // that they're not unique. If the key is `null`, the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myKey + // Note: If the key type is not string, it's string representation has to be + // supplied for the attribute. If the key has no unambiguous, canonical string + // form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + + // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the + // "messaging.kafka.message.tombstone" semantic conventions. It represents a + // boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") + + // MessagingKafkaOffsetKey is the attribute Key conforming to the + // "messaging.kafka.offset" semantic conventions. It represents the offset of a + // record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingKafkaOffsetKey = attribute.Key("messaging.kafka.offset") + + // MessagingMessageBodySizeKey is the attribute Key conforming to the + // "messaging.message.body.size" semantic conventions. It represents the size of + // the message body in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Note: This can refer to both the compressed or uncompressed body size. If + // both sizes are known, the uncompressed + // body size should be used. + MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size") + + // MessagingMessageConversationIDKey is the attribute Key conforming to the + // "messaging.message.conversation_id" semantic conventions. It represents the + // conversation ID identifying the conversation to which the message belongs, + // represented as a string. Sometimes called "Correlation ID". + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: MyConversationId + MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") + + // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the + // "messaging.message.envelope.size" semantic conventions. It represents the + // size of the message body and metadata in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Note: This can refer to both the compressed or uncompressed size. If both + // sizes are known, the uncompressed + // size should be used. + MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size") + + // MessagingMessageIDKey is the attribute Key conforming to the + // "messaging.message.id" semantic conventions. It represents a value used by + // the messaging system as an identifier for the message, represented as a + // string. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 452a7c7c7c7048c2f887f61572b18fc2 + MessagingMessageIDKey = attribute.Key("messaging.message.id") + + // MessagingOperationNameKey is the attribute Key conforming to the + // "messaging.operation.name" semantic conventions. It represents the + // system-specific name of the messaging operation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ack", "nack", "send" + MessagingOperationNameKey = attribute.Key("messaging.operation.name") + + // MessagingOperationTypeKey is the attribute Key conforming to the + // "messaging.operation.type" semantic conventions. It represents a string + // identifying the type of the messaging operation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationTypeKey = attribute.Key("messaging.operation.type") + + // MessagingRabbitMQDestinationRoutingKeyKey is the attribute Key conforming to + // the "messaging.rabbitmq.destination.routing_key" semantic conventions. It + // represents the rabbitMQ message routing key. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myKey + MessagingRabbitMQDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") + + // MessagingRabbitMQMessageDeliveryTagKey is the attribute Key conforming to the + // "messaging.rabbitmq.message.delivery_tag" semantic conventions. It represents + // the rabbitMQ message delivery tag. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingRabbitMQMessageDeliveryTagKey = attribute.Key("messaging.rabbitmq.message.delivery_tag") + + // MessagingRocketMQConsumptionModelKey is the attribute Key conforming to the + // "messaging.rocketmq.consumption_model" semantic conventions. It represents + // the model of message consumption. This only applies to consumer spans. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingRocketMQConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") + + // MessagingRocketMQMessageDelayTimeLevelKey is the attribute Key conforming to + // the "messaging.rocketmq.message.delay_time_level" semantic conventions. It + // represents the delay time level for delay message, which determines the + // message delay time. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingRocketMQMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + + // MessagingRocketMQMessageDeliveryTimestampKey is the attribute Key conforming + // to the "messaging.rocketmq.message.delivery_timestamp" semantic conventions. + // It represents the timestamp in milliseconds that the delay message is + // expected to be delivered to consumer. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingRocketMQMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + + // MessagingRocketMQMessageGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.message.group" semantic conventions. It represents the it + // is essential for FIFO message. Messages that belong to the same message group + // are always processed one by one within the same consumer group. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myMessageGroup + MessagingRocketMQMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + + // MessagingRocketMQMessageKeysKey is the attribute Key conforming to the + // "messaging.rocketmq.message.keys" semantic conventions. It represents the + // key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "keyA", "keyB" + MessagingRocketMQMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + + // MessagingRocketMQMessageTagKey is the attribute Key conforming to the + // "messaging.rocketmq.message.tag" semantic conventions. It represents the + // secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: tagA + MessagingRocketMQMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + + // MessagingRocketMQMessageTypeKey is the attribute Key conforming to the + // "messaging.rocketmq.message.type" semantic conventions. It represents the + // type of message. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingRocketMQMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + + // MessagingRocketMQNamespaceKey is the attribute Key conforming to the + // "messaging.rocketmq.namespace" semantic conventions. It represents the + // namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myNamespace + MessagingRocketMQNamespaceKey = attribute.Key("messaging.rocketmq.namespace") + + // MessagingServiceBusDispositionStatusKey is the attribute Key conforming to + // the "messaging.servicebus.disposition_status" semantic conventions. It + // represents the describes the [settlement type]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [settlement type]: https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock + MessagingServiceBusDispositionStatusKey = attribute.Key("messaging.servicebus.disposition_status") + + // MessagingServiceBusMessageDeliveryCountKey is the attribute Key conforming to + // the "messaging.servicebus.message.delivery_count" semantic conventions. It + // represents the number of deliveries that have been attempted for this + // message. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingServiceBusMessageDeliveryCountKey = attribute.Key("messaging.servicebus.message.delivery_count") + + // MessagingServiceBusMessageEnqueuedTimeKey is the attribute Key conforming to + // the "messaging.servicebus.message.enqueued_time" semantic conventions. It + // represents the UTC epoch seconds at which the message has been accepted and + // stored in the entity. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingServiceBusMessageEnqueuedTimeKey = attribute.Key("messaging.servicebus.message.enqueued_time") + + // MessagingSystemKey is the attribute Key conforming to the "messaging.system" + // semantic conventions. It represents the messaging system as identified by the + // client instrumentation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: The actual messaging system may differ from the one known by the + // client. For example, when using Kafka client libraries to communicate with + // Azure Event Hubs, the `messaging.system` is set to `kafka` based on the + // instrumentation's best knowledge. + MessagingSystemKey = attribute.Key("messaging.system") +) + +// MessagingBatchMessageCount returns an attribute KeyValue conforming to the +// "messaging.batch.message_count" semantic conventions. It represents the number +// of messages sent, received, or processed in the scope of the batching +// operation. +func MessagingBatchMessageCount(val int) attribute.KeyValue { + return MessagingBatchMessageCountKey.Int(val) +} + +// MessagingClientID returns an attribute KeyValue conforming to the +// "messaging.client.id" semantic conventions. It represents a unique identifier +// for the client that consumes or produces a message. +func MessagingClientID(val string) attribute.KeyValue { + return MessagingClientIDKey.String(val) +} + +// MessagingConsumerGroupName returns an attribute KeyValue conforming to the +// "messaging.consumer.group.name" semantic conventions. It represents the name +// of the consumer group with which a consumer is associated. +func MessagingConsumerGroupName(val string) attribute.KeyValue { + return MessagingConsumerGroupNameKey.String(val) +} + +// MessagingDestinationAnonymous returns an attribute KeyValue conforming to the +// "messaging.destination.anonymous" semantic conventions. It represents a +// boolean that is true if the message destination is anonymous (could be unnamed +// or have auto-generated name). +func MessagingDestinationAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationAnonymousKey.Bool(val) +} + +// MessagingDestinationName returns an attribute KeyValue conforming to the +// "messaging.destination.name" semantic conventions. It represents the message +// destination name. +func MessagingDestinationName(val string) attribute.KeyValue { + return MessagingDestinationNameKey.String(val) +} + +// MessagingDestinationPartitionID returns an attribute KeyValue conforming to +// the "messaging.destination.partition.id" semantic conventions. It represents +// the identifier of the partition messages are sent to or received from, unique +// within the `messaging.destination.name`. +func MessagingDestinationPartitionID(val string) attribute.KeyValue { + return MessagingDestinationPartitionIDKey.String(val) +} + +// MessagingDestinationSubscriptionName returns an attribute KeyValue conforming +// to the "messaging.destination.subscription.name" semantic conventions. It +// represents the name of the destination subscription from which a message is +// consumed. +func MessagingDestinationSubscriptionName(val string) attribute.KeyValue { + return MessagingDestinationSubscriptionNameKey.String(val) +} + +// MessagingDestinationTemplate returns an attribute KeyValue conforming to the +// "messaging.destination.template" semantic conventions. It represents the low +// cardinality representation of the messaging destination name. +func MessagingDestinationTemplate(val string) attribute.KeyValue { + return MessagingDestinationTemplateKey.String(val) +} + +// MessagingDestinationTemporary returns an attribute KeyValue conforming to the +// "messaging.destination.temporary" semantic conventions. It represents a +// boolean that is true if the message destination is temporary and might not +// exist anymore after messages are processed. +func MessagingDestinationTemporary(val bool) attribute.KeyValue { + return MessagingDestinationTemporaryKey.Bool(val) +} + +// MessagingEventHubsMessageEnqueuedTime returns an attribute KeyValue conforming +// to the "messaging.eventhubs.message.enqueued_time" semantic conventions. It +// represents the UTC epoch seconds at which the message has been accepted and +// stored in the entity. +func MessagingEventHubsMessageEnqueuedTime(val int) attribute.KeyValue { + return MessagingEventHubsMessageEnqueuedTimeKey.Int(val) +} + +// MessagingGCPPubSubMessageAckDeadline returns an attribute KeyValue conforming +// to the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. It +// represents the ack deadline in seconds set for the modify ack deadline +// request. +func MessagingGCPPubSubMessageAckDeadline(val int) attribute.KeyValue { + return MessagingGCPPubSubMessageAckDeadlineKey.Int(val) +} + +// MessagingGCPPubSubMessageAckID returns an attribute KeyValue conforming to the +// "messaging.gcp_pubsub.message.ack_id" semantic conventions. It represents the +// ack id for a given message. +func MessagingGCPPubSubMessageAckID(val string) attribute.KeyValue { + return MessagingGCPPubSubMessageAckIDKey.String(val) +} + +// MessagingGCPPubSubMessageDeliveryAttempt returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.delivery_attempt" semantic +// conventions. It represents the delivery attempt for a given message. +func MessagingGCPPubSubMessageDeliveryAttempt(val int) attribute.KeyValue { + return MessagingGCPPubSubMessageDeliveryAttemptKey.Int(val) +} + +// MessagingGCPPubSubMessageOrderingKey returns an attribute KeyValue conforming +// to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. It +// represents the ordering key for a given message. If the attribute is not +// present, the message does not have an ordering key. +func MessagingGCPPubSubMessageOrderingKey(val string) attribute.KeyValue { + return MessagingGCPPubSubMessageOrderingKeyKey.String(val) +} + +// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the +// "messaging.kafka.message.key" semantic conventions. It represents the message +// keys in Kafka are used for grouping alike messages to ensure they're processed +// on the same partition. They differ from `messaging.message.id` in that they're +// not unique. If the key is `null`, the attribute MUST NOT be set. +func MessagingKafkaMessageKey(val string) attribute.KeyValue { + return MessagingKafkaMessageKeyKey.String(val) +} + +// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming to the +// "messaging.kafka.message.tombstone" semantic conventions. It represents a +// boolean that is true if the message is a tombstone. +func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { + return MessagingKafkaMessageTombstoneKey.Bool(val) +} + +// MessagingKafkaOffset returns an attribute KeyValue conforming to the +// "messaging.kafka.offset" semantic conventions. It represents the offset of a +// record in the corresponding Kafka partition. +func MessagingKafkaOffset(val int) attribute.KeyValue { + return MessagingKafkaOffsetKey.Int(val) +} + +// MessagingMessageBodySize returns an attribute KeyValue conforming to the +// "messaging.message.body.size" semantic conventions. It represents the size of +// the message body in bytes. +func MessagingMessageBodySize(val int) attribute.KeyValue { + return MessagingMessageBodySizeKey.Int(val) +} + +// MessagingMessageConversationID returns an attribute KeyValue conforming to the +// "messaging.message.conversation_id" semantic conventions. It represents the +// conversation ID identifying the conversation to which the message belongs, +// represented as a string. Sometimes called "Correlation ID". +func MessagingMessageConversationID(val string) attribute.KeyValue { + return MessagingMessageConversationIDKey.String(val) +} + +// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to the +// "messaging.message.envelope.size" semantic conventions. It represents the size +// of the message body and metadata in bytes. +func MessagingMessageEnvelopeSize(val int) attribute.KeyValue { + return MessagingMessageEnvelopeSizeKey.Int(val) +} + +// MessagingMessageID returns an attribute KeyValue conforming to the +// "messaging.message.id" semantic conventions. It represents a value used by the +// messaging system as an identifier for the message, represented as a string. +func MessagingMessageID(val string) attribute.KeyValue { + return MessagingMessageIDKey.String(val) +} + +// MessagingOperationName returns an attribute KeyValue conforming to the +// "messaging.operation.name" semantic conventions. It represents the +// system-specific name of the messaging operation. +func MessagingOperationName(val string) attribute.KeyValue { + return MessagingOperationNameKey.String(val) +} + +// MessagingRabbitMQDestinationRoutingKey returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.destination.routing_key" semantic +// conventions. It represents the rabbitMQ message routing key. +func MessagingRabbitMQDestinationRoutingKey(val string) attribute.KeyValue { + return MessagingRabbitMQDestinationRoutingKeyKey.String(val) +} + +// MessagingRabbitMQMessageDeliveryTag returns an attribute KeyValue conforming +// to the "messaging.rabbitmq.message.delivery_tag" semantic conventions. It +// represents the rabbitMQ message delivery tag. +func MessagingRabbitMQMessageDeliveryTag(val int) attribute.KeyValue { + return MessagingRabbitMQMessageDeliveryTagKey.Int(val) +} + +// MessagingRocketMQMessageDelayTimeLevel returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delay_time_level" semantic +// conventions. It represents the delay time level for delay message, which +// determines the message delay time. +func MessagingRocketMQMessageDelayTimeLevel(val int) attribute.KeyValue { + return MessagingRocketMQMessageDelayTimeLevelKey.Int(val) +} + +// MessagingRocketMQMessageDeliveryTimestamp returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic +// conventions. It represents the timestamp in milliseconds that the delay +// message is expected to be delivered to consumer. +func MessagingRocketMQMessageDeliveryTimestamp(val int) attribute.KeyValue { + return MessagingRocketMQMessageDeliveryTimestampKey.Int(val) +} + +// MessagingRocketMQMessageGroup returns an attribute KeyValue conforming to the +// "messaging.rocketmq.message.group" semantic conventions. It represents the it +// is essential for FIFO message. Messages that belong to the same message group +// are always processed one by one within the same consumer group. +func MessagingRocketMQMessageGroup(val string) attribute.KeyValue { + return MessagingRocketMQMessageGroupKey.String(val) +} + +// MessagingRocketMQMessageKeys returns an attribute KeyValue conforming to the +// "messaging.rocketmq.message.keys" semantic conventions. It represents the +// key(s) of message, another way to mark message besides message id. +func MessagingRocketMQMessageKeys(val ...string) attribute.KeyValue { + return MessagingRocketMQMessageKeysKey.StringSlice(val) +} + +// MessagingRocketMQMessageTag returns an attribute KeyValue conforming to the +// "messaging.rocketmq.message.tag" semantic conventions. It represents the +// secondary classifier of message besides topic. +func MessagingRocketMQMessageTag(val string) attribute.KeyValue { + return MessagingRocketMQMessageTagKey.String(val) +} + +// MessagingRocketMQNamespace returns an attribute KeyValue conforming to the +// "messaging.rocketmq.namespace" semantic conventions. It represents the +// namespace of RocketMQ resources, resources in different namespaces are +// individual. +func MessagingRocketMQNamespace(val string) attribute.KeyValue { + return MessagingRocketMQNamespaceKey.String(val) +} + +// MessagingServiceBusMessageDeliveryCount returns an attribute KeyValue +// conforming to the "messaging.servicebus.message.delivery_count" semantic +// conventions. It represents the number of deliveries that have been attempted +// for this message. +func MessagingServiceBusMessageDeliveryCount(val int) attribute.KeyValue { + return MessagingServiceBusMessageDeliveryCountKey.Int(val) +} + +// MessagingServiceBusMessageEnqueuedTime returns an attribute KeyValue +// conforming to the "messaging.servicebus.message.enqueued_time" semantic +// conventions. It represents the UTC epoch seconds at which the message has been +// accepted and stored in the entity. +func MessagingServiceBusMessageEnqueuedTime(val int) attribute.KeyValue { + return MessagingServiceBusMessageEnqueuedTimeKey.Int(val) +} + +// Enum values for messaging.operation.type +var ( + // A message is created. "Create" spans always refer to a single message and are + // used to provide a unique creation context for messages in batch sending + // scenarios. + // + // Stability: development + MessagingOperationTypeCreate = MessagingOperationTypeKey.String("create") + // One or more messages are provided for sending to an intermediary. If a single + // message is sent, the context of the "Send" span can be used as the creation + // context and no "Create" span needs to be created. + // + // Stability: development + MessagingOperationTypeSend = MessagingOperationTypeKey.String("send") + // One or more messages are requested by a consumer. This operation refers to + // pull-based scenarios, where consumers explicitly call methods of messaging + // SDKs to receive messages. + // + // Stability: development + MessagingOperationTypeReceive = MessagingOperationTypeKey.String("receive") + // One or more messages are processed by a consumer. + // + // Stability: development + MessagingOperationTypeProcess = MessagingOperationTypeKey.String("process") + // One or more messages are settled. + // + // Stability: development + MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle") + // Deprecated: Replaced by `process`. + MessagingOperationTypeDeliver = MessagingOperationTypeKey.String("deliver") + // Deprecated: Replaced by `send`. + MessagingOperationTypePublish = MessagingOperationTypeKey.String("publish") +) + +// Enum values for messaging.rocketmq.consumption_model +var ( + // Clustering consumption model + // Stability: development + MessagingRocketMQConsumptionModelClustering = MessagingRocketMQConsumptionModelKey.String("clustering") + // Broadcasting consumption model + // Stability: development + MessagingRocketMQConsumptionModelBroadcasting = MessagingRocketMQConsumptionModelKey.String("broadcasting") +) + +// Enum values for messaging.rocketmq.message.type +var ( + // Normal message + // Stability: development + MessagingRocketMQMessageTypeNormal = MessagingRocketMQMessageTypeKey.String("normal") + // FIFO message + // Stability: development + MessagingRocketMQMessageTypeFifo = MessagingRocketMQMessageTypeKey.String("fifo") + // Delay message + // Stability: development + MessagingRocketMQMessageTypeDelay = MessagingRocketMQMessageTypeKey.String("delay") + // Transaction message + // Stability: development + MessagingRocketMQMessageTypeTransaction = MessagingRocketMQMessageTypeKey.String("transaction") +) + +// Enum values for messaging.servicebus.disposition_status +var ( + // Message is completed + // Stability: development + MessagingServiceBusDispositionStatusComplete = MessagingServiceBusDispositionStatusKey.String("complete") + // Message is abandoned + // Stability: development + MessagingServiceBusDispositionStatusAbandon = MessagingServiceBusDispositionStatusKey.String("abandon") + // Message is sent to dead letter queue + // Stability: development + MessagingServiceBusDispositionStatusDeadLetter = MessagingServiceBusDispositionStatusKey.String("dead_letter") + // Message is deferred + // Stability: development + MessagingServiceBusDispositionStatusDefer = MessagingServiceBusDispositionStatusKey.String("defer") +) + +// Enum values for messaging.system +var ( + // Apache ActiveMQ + // Stability: development + MessagingSystemActiveMQ = MessagingSystemKey.String("activemq") + // Amazon Simple Queue Service (SQS) + // Stability: development + MessagingSystemAWSSQS = MessagingSystemKey.String("aws_sqs") + // Azure Event Grid + // Stability: development + MessagingSystemEventGrid = MessagingSystemKey.String("eventgrid") + // Azure Event Hubs + // Stability: development + MessagingSystemEventHubs = MessagingSystemKey.String("eventhubs") + // Azure Service Bus + // Stability: development + MessagingSystemServiceBus = MessagingSystemKey.String("servicebus") + // Google Cloud Pub/Sub + // Stability: development + MessagingSystemGCPPubSub = MessagingSystemKey.String("gcp_pubsub") + // Java Message Service + // Stability: development + MessagingSystemJMS = MessagingSystemKey.String("jms") + // Apache Kafka + // Stability: development + MessagingSystemKafka = MessagingSystemKey.String("kafka") + // RabbitMQ + // Stability: development + MessagingSystemRabbitMQ = MessagingSystemKey.String("rabbitmq") + // Apache RocketMQ + // Stability: development + MessagingSystemRocketMQ = MessagingSystemKey.String("rocketmq") + // Apache Pulsar + // Stability: development + MessagingSystemPulsar = MessagingSystemKey.String("pulsar") +) + +// Namespace: network +const ( + // NetworkCarrierICCKey is the attribute Key conforming to the + // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 + // alpha-2 2-character country code associated with the mobile carrier network. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: DE + NetworkCarrierICCKey = attribute.Key("network.carrier.icc") + + // NetworkCarrierMCCKey is the attribute Key conforming to the + // "network.carrier.mcc" semantic conventions. It represents the mobile carrier + // country code. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 310 + NetworkCarrierMCCKey = attribute.Key("network.carrier.mcc") + + // NetworkCarrierMNCKey is the attribute Key conforming to the + // "network.carrier.mnc" semantic conventions. It represents the mobile carrier + // network code. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 001 + NetworkCarrierMNCKey = attribute.Key("network.carrier.mnc") + + // NetworkCarrierNameKey is the attribute Key conforming to the + // "network.carrier.name" semantic conventions. It represents the name of the + // mobile carrier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: sprint + NetworkCarrierNameKey = attribute.Key("network.carrier.name") + + // NetworkConnectionStateKey is the attribute Key conforming to the + // "network.connection.state" semantic conventions. It represents the state of + // network connection. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "close_wait" + // Note: Connection states are defined as part of the [rfc9293] + // + // [rfc9293]: https://datatracker.ietf.org/doc/html/rfc9293#section-3.3.2 + NetworkConnectionStateKey = attribute.Key("network.connection.state") + + // NetworkConnectionSubtypeKey is the attribute Key conforming to the + // "network.connection.subtype" semantic conventions. It represents the this + // describes more details regarding the connection.type. It may be the type of + // cell technology connection, but it could be used for describing details about + // a wifi connection. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: LTE + NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") + + // NetworkConnectionTypeKey is the attribute Key conforming to the + // "network.connection.type" semantic conventions. It represents the internet + // connection type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: wifi + NetworkConnectionTypeKey = attribute.Key("network.connection.type") + + // NetworkInterfaceNameKey is the attribute Key conforming to the + // "network.interface.name" semantic conventions. It represents the network + // interface name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "lo", "eth0" + NetworkInterfaceNameKey = attribute.Key("network.interface.name") + + // NetworkIODirectionKey is the attribute Key conforming to the + // "network.io.direction" semantic conventions. It represents the network IO + // operation direction. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "transmit" + NetworkIODirectionKey = attribute.Key("network.io.direction") + + // NetworkLocalAddressKey is the attribute Key conforming to the + // "network.local.address" semantic conventions. It represents the local address + // of the network connection - IP address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "10.1.2.80", "/tmp/my.sock" + NetworkLocalAddressKey = attribute.Key("network.local.address") + + // NetworkLocalPortKey is the attribute Key conforming to the + // "network.local.port" semantic conventions. It represents the local port + // number of the network connection. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 65123 + NetworkLocalPortKey = attribute.Key("network.local.port") + + // NetworkPeerAddressKey is the attribute Key conforming to the + // "network.peer.address" semantic conventions. It represents the peer address + // of the network connection - IP address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "10.1.2.80", "/tmp/my.sock" + NetworkPeerAddressKey = attribute.Key("network.peer.address") + + // NetworkPeerPortKey is the attribute Key conforming to the "network.peer.port" + // semantic conventions. It represents the peer port number of the network + // connection. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 65123 + NetworkPeerPortKey = attribute.Key("network.peer.port") + + // NetworkProtocolNameKey is the attribute Key conforming to the + // "network.protocol.name" semantic conventions. It represents the + // [OSI application layer] or non-OSI equivalent. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "amqp", "http", "mqtt" + // Note: The value SHOULD be normalized to lowercase. + // + // [OSI application layer]: https://wikipedia.org/wiki/Application_layer + NetworkProtocolNameKey = attribute.Key("network.protocol.name") + + // NetworkProtocolVersionKey is the attribute Key conforming to the + // "network.protocol.version" semantic conventions. It represents the actual + // version of the protocol used for network communication. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "1.1", "2" + // Note: If protocol version is subject to negotiation (for example using [ALPN] + // ), this attribute SHOULD be set to the negotiated version. If the actual + // protocol version is not known, this attribute SHOULD NOT be set. + // + // [ALPN]: https://www.rfc-editor.org/rfc/rfc7301.html + NetworkProtocolVersionKey = attribute.Key("network.protocol.version") + + // NetworkTransportKey is the attribute Key conforming to the + // "network.transport" semantic conventions. It represents the + // [OSI transport layer] or [inter-process communication method]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "tcp", "udp" + // Note: The value SHOULD be normalized to lowercase. + // + // Consider always setting the transport when setting a port number, since + // a port number is ambiguous without knowing the transport. For example + // different processes could be listening on TCP port 12345 and UDP port 12345. + // + // [OSI transport layer]: https://wikipedia.org/wiki/Transport_layer + // [inter-process communication method]: https://wikipedia.org/wiki/Inter-process_communication + NetworkTransportKey = attribute.Key("network.transport") + + // NetworkTypeKey is the attribute Key conforming to the "network.type" semantic + // conventions. It represents the [OSI network layer] or non-OSI equivalent. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "ipv4", "ipv6" + // Note: The value SHOULD be normalized to lowercase. + // + // [OSI network layer]: https://wikipedia.org/wiki/Network_layer + NetworkTypeKey = attribute.Key("network.type") +) + +// NetworkCarrierICC returns an attribute KeyValue conforming to the +// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 +// alpha-2 2-character country code associated with the mobile carrier network. +func NetworkCarrierICC(val string) attribute.KeyValue { + return NetworkCarrierICCKey.String(val) +} + +// NetworkCarrierMCC returns an attribute KeyValue conforming to the +// "network.carrier.mcc" semantic conventions. It represents the mobile carrier +// country code. +func NetworkCarrierMCC(val string) attribute.KeyValue { + return NetworkCarrierMCCKey.String(val) +} + +// NetworkCarrierMNC returns an attribute KeyValue conforming to the +// "network.carrier.mnc" semantic conventions. It represents the mobile carrier +// network code. +func NetworkCarrierMNC(val string) attribute.KeyValue { + return NetworkCarrierMNCKey.String(val) +} + +// NetworkCarrierName returns an attribute KeyValue conforming to the +// "network.carrier.name" semantic conventions. It represents the name of the +// mobile carrier. +func NetworkCarrierName(val string) attribute.KeyValue { + return NetworkCarrierNameKey.String(val) +} + +// NetworkInterfaceName returns an attribute KeyValue conforming to the +// "network.interface.name" semantic conventions. It represents the network +// interface name. +func NetworkInterfaceName(val string) attribute.KeyValue { + return NetworkInterfaceNameKey.String(val) +} + +// NetworkLocalAddress returns an attribute KeyValue conforming to the +// "network.local.address" semantic conventions. It represents the local address +// of the network connection - IP address or Unix domain socket name. +func NetworkLocalAddress(val string) attribute.KeyValue { + return NetworkLocalAddressKey.String(val) +} + +// NetworkLocalPort returns an attribute KeyValue conforming to the +// "network.local.port" semantic conventions. It represents the local port number +// of the network connection. +func NetworkLocalPort(val int) attribute.KeyValue { + return NetworkLocalPortKey.Int(val) +} + +// NetworkPeerAddress returns an attribute KeyValue conforming to the +// "network.peer.address" semantic conventions. It represents the peer address of +// the network connection - IP address or Unix domain socket name. +func NetworkPeerAddress(val string) attribute.KeyValue { + return NetworkPeerAddressKey.String(val) +} + +// NetworkPeerPort returns an attribute KeyValue conforming to the +// "network.peer.port" semantic conventions. It represents the peer port number +// of the network connection. +func NetworkPeerPort(val int) attribute.KeyValue { + return NetworkPeerPortKey.Int(val) +} + +// NetworkProtocolName returns an attribute KeyValue conforming to the +// "network.protocol.name" semantic conventions. It represents the +// [OSI application layer] or non-OSI equivalent. +// +// [OSI application layer]: https://wikipedia.org/wiki/Application_layer +func NetworkProtocolName(val string) attribute.KeyValue { + return NetworkProtocolNameKey.String(val) +} + +// NetworkProtocolVersion returns an attribute KeyValue conforming to the +// "network.protocol.version" semantic conventions. It represents the actual +// version of the protocol used for network communication. +func NetworkProtocolVersion(val string) attribute.KeyValue { + return NetworkProtocolVersionKey.String(val) +} + +// Enum values for network.connection.state +var ( + // closed + // Stability: development + NetworkConnectionStateClosed = NetworkConnectionStateKey.String("closed") + // close_wait + // Stability: development + NetworkConnectionStateCloseWait = NetworkConnectionStateKey.String("close_wait") + // closing + // Stability: development + NetworkConnectionStateClosing = NetworkConnectionStateKey.String("closing") + // established + // Stability: development + NetworkConnectionStateEstablished = NetworkConnectionStateKey.String("established") + // fin_wait_1 + // Stability: development + NetworkConnectionStateFinWait1 = NetworkConnectionStateKey.String("fin_wait_1") + // fin_wait_2 + // Stability: development + NetworkConnectionStateFinWait2 = NetworkConnectionStateKey.String("fin_wait_2") + // last_ack + // Stability: development + NetworkConnectionStateLastAck = NetworkConnectionStateKey.String("last_ack") + // listen + // Stability: development + NetworkConnectionStateListen = NetworkConnectionStateKey.String("listen") + // syn_received + // Stability: development + NetworkConnectionStateSynReceived = NetworkConnectionStateKey.String("syn_received") + // syn_sent + // Stability: development + NetworkConnectionStateSynSent = NetworkConnectionStateKey.String("syn_sent") + // time_wait + // Stability: development + NetworkConnectionStateTimeWait = NetworkConnectionStateKey.String("time_wait") +) + +// Enum values for network.connection.subtype +var ( + // GPRS + // Stability: development + NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") + // EDGE + // Stability: development + NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") + // UMTS + // Stability: development + NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") + // CDMA + // Stability: development + NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + // Stability: development + NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + // Stability: development + NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + // Stability: development + NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + // Stability: development + NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") + // HSUPA + // Stability: development + NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") + // HSPA + // Stability: development + NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") + // IDEN + // Stability: development + NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") + // EVDO Rev. B + // Stability: development + NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") + // LTE + // Stability: development + NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") + // EHRPD + // Stability: development + NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") + // HSPAP + // Stability: development + NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") + // GSM + // Stability: development + NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") + // TD-SCDMA + // Stability: development + NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") + // IWLAN + // Stability: development + NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + // Stability: development + NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + // Stability: development + NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") + // LTE CA + // Stability: development + NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") +) + +// Enum values for network.connection.type +var ( + // wifi + // Stability: development + NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") + // wired + // Stability: development + NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") + // cell + // Stability: development + NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") + // unavailable + // Stability: development + NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") + // unknown + // Stability: development + NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") +) + +// Enum values for network.io.direction +var ( + // transmit + // Stability: development + NetworkIODirectionTransmit = NetworkIODirectionKey.String("transmit") + // receive + // Stability: development + NetworkIODirectionReceive = NetworkIODirectionKey.String("receive") +) + +// Enum values for network.transport +var ( + // TCP + // Stability: stable + NetworkTransportTCP = NetworkTransportKey.String("tcp") + // UDP + // Stability: stable + NetworkTransportUDP = NetworkTransportKey.String("udp") + // Named or anonymous pipe. + // Stability: stable + NetworkTransportPipe = NetworkTransportKey.String("pipe") + // Unix domain socket + // Stability: stable + NetworkTransportUnix = NetworkTransportKey.String("unix") + // QUIC + // Stability: stable + NetworkTransportQUIC = NetworkTransportKey.String("quic") +) + +// Enum values for network.type +var ( + // IPv4 + // Stability: stable + NetworkTypeIPv4 = NetworkTypeKey.String("ipv4") + // IPv6 + // Stability: stable + NetworkTypeIPv6 = NetworkTypeKey.String("ipv6") +) + +// Namespace: oci +const ( + // OCIManifestDigestKey is the attribute Key conforming to the + // "oci.manifest.digest" semantic conventions. It represents the digest of the + // OCI image manifest. For container images specifically is the digest by which + // the container image is known. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4" + // Note: Follows [OCI Image Manifest Specification], and specifically the + // [Digest property]. + // An example can be found in [Example Image Manifest]. + // + // [OCI Image Manifest Specification]: https://github.com/opencontainers/image-spec/blob/main/manifest.md + // [Digest property]: https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests + // [Example Image Manifest]: https://github.com/opencontainers/image-spec/blob/main/manifest.md#example-image-manifest + OCIManifestDigestKey = attribute.Key("oci.manifest.digest") +) + +// OCIManifestDigest returns an attribute KeyValue conforming to the +// "oci.manifest.digest" semantic conventions. It represents the digest of the +// OCI image manifest. For container images specifically is the digest by which +// the container image is known. +func OCIManifestDigest(val string) attribute.KeyValue { + return OCIManifestDigestKey.String(val) +} + +// Namespace: opentracing +const ( + // OpenTracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the parent-child + // Reference type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: The causal relationship between a child Span and a parent Span. + OpenTracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +// Enum values for opentracing.ref_type +var ( + // The parent Span depends on the child Span in some capacity + // Stability: development + OpenTracingRefTypeChildOf = OpenTracingRefTypeKey.String("child_of") + // The parent Span doesn't depend in any way on the result of the child Span + // Stability: development + OpenTracingRefTypeFollowsFrom = OpenTracingRefTypeKey.String("follows_from") +) + +// Namespace: os +const ( + // OSBuildIDKey is the attribute Key conforming to the "os.build_id" semantic + // conventions. It represents the unique identifier for a particular build or + // compilation of the operating system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "TQ3C.230805.001.B2", "20E247", "22621" + OSBuildIDKey = attribute.Key("os.build_id") + + // OSDescriptionKey is the attribute Key conforming to the "os.description" + // semantic conventions. It represents the human readable (not intended to be + // parsed) OS version information, like e.g. reported by `ver` or + // `lsb_release -a` commands. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Microsoft Windows [Version 10.0.18363.778]", "Ubuntu 18.04.1 LTS" + OSDescriptionKey = attribute.Key("os.description") + + // OSNameKey is the attribute Key conforming to the "os.name" semantic + // conventions. It represents the human readable operating system name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "iOS", "Android", "Ubuntu" + OSNameKey = attribute.Key("os.name") + + // OSTypeKey is the attribute Key conforming to the "os.type" semantic + // conventions. It represents the operating system type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + OSTypeKey = attribute.Key("os.type") + + // OSVersionKey is the attribute Key conforming to the "os.version" semantic + // conventions. It represents the version string of the operating system as + // defined in [Version Attributes]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "14.2.1", "18.04.1" + // + // [Version Attributes]: /docs/resource/README.md#version-attributes + OSVersionKey = attribute.Key("os.version") +) + +// OSBuildID returns an attribute KeyValue conforming to the "os.build_id" +// semantic conventions. It represents the unique identifier for a particular +// build or compilation of the operating system. +func OSBuildID(val string) attribute.KeyValue { + return OSBuildIDKey.String(val) +} + +// OSDescription returns an attribute KeyValue conforming to the "os.description" +// semantic conventions. It represents the human readable (not intended to be +// parsed) OS version information, like e.g. reported by `ver` or +// `lsb_release -a` commands. +func OSDescription(val string) attribute.KeyValue { + return OSDescriptionKey.String(val) +} + +// OSName returns an attribute KeyValue conforming to the "os.name" semantic +// conventions. It represents the human readable operating system name. +func OSName(val string) attribute.KeyValue { + return OSNameKey.String(val) +} + +// OSVersion returns an attribute KeyValue conforming to the "os.version" +// semantic conventions. It represents the version string of the operating system +// as defined in [Version Attributes]. +// +// [Version Attributes]: /docs/resource/README.md#version-attributes +func OSVersion(val string) attribute.KeyValue { + return OSVersionKey.String(val) +} + +// Enum values for os.type +var ( + // Microsoft Windows + // Stability: development + OSTypeWindows = OSTypeKey.String("windows") + // Linux + // Stability: development + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + // Stability: development + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + // Stability: development + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + // Stability: development + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + // Stability: development + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + // Stability: development + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + // Stability: development + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + // Stability: development + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + // Stability: development + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + // Stability: development + OSTypeZOS = OSTypeKey.String("z_os") +) + +// Namespace: otel +const ( + // OTelComponentNameKey is the attribute Key conforming to the + // "otel.component.name" semantic conventions. It represents a name uniquely + // identifying the instance of the OpenTelemetry component within its containing + // SDK instance. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otlp_grpc_span_exporter/0", "custom-name" + // Note: Implementations SHOULD ensure a low cardinality for this attribute, + // even across application or SDK restarts. + // E.g. implementations MUST NOT use UUIDs as values for this attribute. + // + // Implementations MAY achieve these goals by following a + // `/` pattern, e.g. + // `batching_span_processor/0`. + // Hereby `otel.component.type` refers to the corresponding attribute value of + // the component. + // + // The value of `instance-counter` MAY be automatically assigned by the + // component and uniqueness within the enclosing SDK instance MUST be + // guaranteed. + // For example, `` MAY be implemented by using a monotonically + // increasing counter (starting with `0`), which is incremented every time an + // instance of the given component type is started. + // + // With this implementation, for example the first Batching Span Processor would + // have `batching_span_processor/0` + // as `otel.component.name`, the second one `batching_span_processor/1` and so + // on. + // These values will therefore be reused in the case of an application restart. + OTelComponentNameKey = attribute.Key("otel.component.name") + + // OTelComponentTypeKey is the attribute Key conforming to the + // "otel.component.type" semantic conventions. It represents a name identifying + // the type of the OpenTelemetry component. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "batching_span_processor", "com.example.MySpanExporter" + // Note: If none of the standardized values apply, implementations SHOULD use + // the language-defined name of the type. + // E.g. for Java the fully qualified classname SHOULD be used in this case. + OTelComponentTypeKey = attribute.Key("otel.component.type") + + // OTelScopeNameKey is the attribute Key conforming to the "otel.scope.name" + // semantic conventions. It represents the name of the instrumentation scope - ( + // `InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "io.opentelemetry.contrib.mongodb" + OTelScopeNameKey = attribute.Key("otel.scope.name") + + // OTelScopeVersionKey is the attribute Key conforming to the + // "otel.scope.version" semantic conventions. It represents the version of the + // instrumentation scope - (`InstrumentationScope.Version` in OTLP). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "1.0.0" + OTelScopeVersionKey = attribute.Key("otel.scope.version") + + // OTelSpanSamplingResultKey is the attribute Key conforming to the + // "otel.span.sampling_result" semantic conventions. It represents the result + // value of the sampler for this span. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + OTelSpanSamplingResultKey = attribute.Key("otel.span.sampling_result") + + // OTelStatusCodeKey is the attribute Key conforming to the "otel.status_code" + // semantic conventions. It represents the name of the code, either "OK" or + // "ERROR". MUST NOT be set if the status code is UNSET. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: + OTelStatusCodeKey = attribute.Key("otel.status_code") + + // OTelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the description + // of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "resource not found" + OTelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +// OTelComponentName returns an attribute KeyValue conforming to the +// "otel.component.name" semantic conventions. It represents a name uniquely +// identifying the instance of the OpenTelemetry component within its containing +// SDK instance. +func OTelComponentName(val string) attribute.KeyValue { + return OTelComponentNameKey.String(val) +} + +// OTelScopeName returns an attribute KeyValue conforming to the +// "otel.scope.name" semantic conventions. It represents the name of the +// instrumentation scope - (`InstrumentationScope.Name` in OTLP). +func OTelScopeName(val string) attribute.KeyValue { + return OTelScopeNameKey.String(val) +} + +// OTelScopeVersion returns an attribute KeyValue conforming to the +// "otel.scope.version" semantic conventions. It represents the version of the +// instrumentation scope - (`InstrumentationScope.Version` in OTLP). +func OTelScopeVersion(val string) attribute.KeyValue { + return OTelScopeVersionKey.String(val) +} + +// OTelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the description +// of the Status if it has a value, otherwise not set. +func OTelStatusDescription(val string) attribute.KeyValue { + return OTelStatusDescriptionKey.String(val) +} + +// Enum values for otel.component.type +var ( + // The builtin SDK batching span processor + // + // Stability: development + OTelComponentTypeBatchingSpanProcessor = OTelComponentTypeKey.String("batching_span_processor") + // The builtin SDK simple span processor + // + // Stability: development + OTelComponentTypeSimpleSpanProcessor = OTelComponentTypeKey.String("simple_span_processor") + // The builtin SDK batching log record processor + // + // Stability: development + OTelComponentTypeBatchingLogProcessor = OTelComponentTypeKey.String("batching_log_processor") + // The builtin SDK simple log record processor + // + // Stability: development + OTelComponentTypeSimpleLogProcessor = OTelComponentTypeKey.String("simple_log_processor") + // OTLP span exporter over gRPC with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpGRPCSpanExporter = OTelComponentTypeKey.String("otlp_grpc_span_exporter") + // OTLP span exporter over HTTP with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPSpanExporter = OTelComponentTypeKey.String("otlp_http_span_exporter") + // OTLP span exporter over HTTP with JSON serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPJSONSpanExporter = OTelComponentTypeKey.String("otlp_http_json_span_exporter") + // OTLP log record exporter over gRPC with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpGRPCLogExporter = OTelComponentTypeKey.String("otlp_grpc_log_exporter") + // OTLP log record exporter over HTTP with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPLogExporter = OTelComponentTypeKey.String("otlp_http_log_exporter") + // OTLP log record exporter over HTTP with JSON serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPJSONLogExporter = OTelComponentTypeKey.String("otlp_http_json_log_exporter") + // The builtin SDK periodically exporting metric reader + // + // Stability: development + OTelComponentTypePeriodicMetricReader = OTelComponentTypeKey.String("periodic_metric_reader") + // OTLP metric exporter over gRPC with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpGRPCMetricExporter = OTelComponentTypeKey.String("otlp_grpc_metric_exporter") + // OTLP metric exporter over HTTP with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPMetricExporter = OTelComponentTypeKey.String("otlp_http_metric_exporter") + // OTLP metric exporter over HTTP with JSON serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPJSONMetricExporter = OTelComponentTypeKey.String("otlp_http_json_metric_exporter") +) + +// Enum values for otel.span.sampling_result +var ( + // The span is not sampled and not recording + // Stability: development + OTelSpanSamplingResultDrop = OTelSpanSamplingResultKey.String("DROP") + // The span is not sampled, but recording + // Stability: development + OTelSpanSamplingResultRecordOnly = OTelSpanSamplingResultKey.String("RECORD_ONLY") + // The span is sampled and recording + // Stability: development + OTelSpanSamplingResultRecordAndSample = OTelSpanSamplingResultKey.String("RECORD_AND_SAMPLE") +) + +// Enum values for otel.status_code +var ( + // The operation has been validated by an Application developer or Operator to + // have completed successfully. + // Stability: stable + OTelStatusCodeOk = OTelStatusCodeKey.String("OK") + // The operation contains an error. + // Stability: stable + OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") +) + +// Namespace: peer +const ( + // PeerServiceKey is the attribute Key conforming to the "peer.service" semantic + // conventions. It represents the [`service.name`] of the remote service. SHOULD + // be equal to the actual `service.name` resource attribute of the remote + // service if any. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: AuthTokenCache + // + // [`service.name`]: /docs/resource/README.md#service + PeerServiceKey = attribute.Key("peer.service") +) + +// PeerService returns an attribute KeyValue conforming to the "peer.service" +// semantic conventions. It represents the [`service.name`] of the remote +// service. SHOULD be equal to the actual `service.name` resource attribute of +// the remote service if any. +// +// [`service.name`]: /docs/resource/README.md#service +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + +// Namespace: process +const ( + // ProcessArgsCountKey is the attribute Key conforming to the + // "process.args_count" semantic conventions. It represents the length of the + // process.command_args array. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 4 + // Note: This field can be useful for querying or performing bucket analysis on + // how many arguments were provided to start a process. More arguments may be an + // indication of suspicious activity. + ProcessArgsCountKey = attribute.Key("process.args_count") + + // ProcessCommandKey is the attribute Key conforming to the "process.command" + // semantic conventions. It represents the command used to launch the process + // (i.e. the command name). On Linux based systems, can be set to the zeroth + // string in `proc/[pid]/cmdline`. On Windows, can be set to the first parameter + // extracted from `GetCommandLineW`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cmd/otelcol" + ProcessCommandKey = attribute.Key("process.command") + + // ProcessCommandArgsKey is the attribute Key conforming to the + // "process.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) as received by + // the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited + // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, this + // would be the full argv vector passed to `main`. SHOULD NOT be collected by + // default unless there is sanitization that excludes sensitive data. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cmd/otecol", "--config=config.yaml" + ProcessCommandArgsKey = attribute.Key("process.command_args") + + // ProcessCommandLineKey is the attribute Key conforming to the + // "process.command_line" semantic conventions. It represents the full command + // used to launch the process as a single string representing the full command. + // On Windows, can be set to the result of `GetCommandLineW`. Do not set this if + // you have to assemble it just for monitoring; use `process.command_args` + // instead. SHOULD NOT be collected by default unless there is sanitization that + // excludes sensitive data. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "C:\cmd\otecol --config="my directory\config.yaml"" + ProcessCommandLineKey = attribute.Key("process.command_line") + + // ProcessContextSwitchTypeKey is the attribute Key conforming to the + // "process.context_switch_type" semantic conventions. It represents the + // specifies whether the context switches for this data point were voluntary or + // involuntary. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + ProcessContextSwitchTypeKey = attribute.Key("process.context_switch_type") + + // ProcessCreationTimeKey is the attribute Key conforming to the + // "process.creation.time" semantic conventions. It represents the date and time + // the process was created, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2023-11-21T09:25:34.853Z" + ProcessCreationTimeKey = attribute.Key("process.creation.time") + + // ProcessExecutableBuildIDGNUKey is the attribute Key conforming to the + // "process.executable.build_id.gnu" semantic conventions. It represents the GNU + // build ID as found in the `.note.gnu.build-id` ELF section (hex string). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "c89b11207f6479603b0d49bf291c092c2b719293" + ProcessExecutableBuildIDGNUKey = attribute.Key("process.executable.build_id.gnu") + + // ProcessExecutableBuildIDGoKey is the attribute Key conforming to the + // "process.executable.build_id.go" semantic conventions. It represents the Go + // build ID as retrieved by `go tool buildid `. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "foh3mEXu7BLZjsN9pOwG/kATcXlYVCDEFouRMQed_/WwRFB1hPo9LBkekthSPG/x8hMC8emW2cCjXD0_1aY" + ProcessExecutableBuildIDGoKey = attribute.Key("process.executable.build_id.go") + + // ProcessExecutableBuildIDHtlhashKey is the attribute Key conforming to the + // "process.executable.build_id.htlhash" semantic conventions. It represents the + // profiling specific build ID for executables. See the OTel specification for + // Profiles for more information. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "600DCAFE4A110000F2BF38C493F5FB92" + ProcessExecutableBuildIDHtlhashKey = attribute.Key("process.executable.build_id.htlhash") + + // ProcessExecutableNameKey is the attribute Key conforming to the + // "process.executable.name" semantic conventions. It represents the name of the + // process executable. On Linux based systems, this SHOULD be set to the base + // name of the target of `/proc/[pid]/exe`. On Windows, this SHOULD be set to + // the base name of `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otelcol" + ProcessExecutableNameKey = attribute.Key("process.executable.name") + + // ProcessExecutablePathKey is the attribute Key conforming to the + // "process.executable.path" semantic conventions. It represents the full path + // to the process executable. On Linux based systems, can be set to the target + // of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/usr/bin/cmd/otelcol" + ProcessExecutablePathKey = attribute.Key("process.executable.path") + + // ProcessExitCodeKey is the attribute Key conforming to the "process.exit.code" + // semantic conventions. It represents the exit code of the process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 127 + ProcessExitCodeKey = attribute.Key("process.exit.code") + + // ProcessExitTimeKey is the attribute Key conforming to the "process.exit.time" + // semantic conventions. It represents the date and time the process exited, in + // ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2023-11-21T09:26:12.315Z" + ProcessExitTimeKey = attribute.Key("process.exit.time") + + // ProcessGroupLeaderPIDKey is the attribute Key conforming to the + // "process.group_leader.pid" semantic conventions. It represents the PID of the + // process's group leader. This is also the process group ID (PGID) of the + // process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 23 + ProcessGroupLeaderPIDKey = attribute.Key("process.group_leader.pid") + + // ProcessInteractiveKey is the attribute Key conforming to the + // "process.interactive" semantic conventions. It represents the whether the + // process is connected to an interactive shell. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + ProcessInteractiveKey = attribute.Key("process.interactive") + + // ProcessLinuxCgroupKey is the attribute Key conforming to the + // "process.linux.cgroup" semantic conventions. It represents the control group + // associated with the process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1:name=systemd:/user.slice/user-1000.slice/session-3.scope", + // "0::/user.slice/user-1000.slice/user@1000.service/tmux-spawn-0267755b-4639-4a27-90ed-f19f88e53748.scope" + // Note: Control groups (cgroups) are a kernel feature used to organize and + // manage process resources. This attribute provides the path(s) to the + // cgroup(s) associated with the process, which should match the contents of the + // [/proc/[PID]/cgroup] file. + // + // [/proc/[PID]/cgroup]: https://man7.org/linux/man-pages/man7/cgroups.7.html + ProcessLinuxCgroupKey = attribute.Key("process.linux.cgroup") + + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" + // semantic conventions. It represents the username of the user that owns the + // process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "root" + ProcessOwnerKey = attribute.Key("process.owner") + + // ProcessPagingFaultTypeKey is the attribute Key conforming to the + // "process.paging.fault_type" semantic conventions. It represents the type of + // page fault for this data point. Type `major` is for major/hard page faults, + // and `minor` is for minor/soft page faults. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + ProcessPagingFaultTypeKey = attribute.Key("process.paging.fault_type") + + // ProcessParentPIDKey is the attribute Key conforming to the + // "process.parent_pid" semantic conventions. It represents the parent Process + // identifier (PPID). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") + + // ProcessPIDKey is the attribute Key conforming to the "process.pid" semantic + // conventions. It represents the process identifier (PID). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + + // ProcessRealUserIDKey is the attribute Key conforming to the + // "process.real_user.id" semantic conventions. It represents the real user ID + // (RUID) of the process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1000 + ProcessRealUserIDKey = attribute.Key("process.real_user.id") + + // ProcessRealUserNameKey is the attribute Key conforming to the + // "process.real_user.name" semantic conventions. It represents the username of + // the real user of the process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "operator" + ProcessRealUserNameKey = attribute.Key("process.real_user.name") + + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0 + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") + + // ProcessRuntimeNameKey is the attribute Key conforming to the + // "process.runtime.name" semantic conventions. It represents the name of the + // runtime of this process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "OpenJDK Runtime Environment" + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + + // ProcessRuntimeVersionKey is the attribute Key conforming to the + // "process.runtime.version" semantic conventions. It represents the version of + // the runtime of this process, as returned by the runtime without modification. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 14.0.2 + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + + // ProcessSavedUserIDKey is the attribute Key conforming to the + // "process.saved_user.id" semantic conventions. It represents the saved user ID + // (SUID) of the process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1002 + ProcessSavedUserIDKey = attribute.Key("process.saved_user.id") + + // ProcessSavedUserNameKey is the attribute Key conforming to the + // "process.saved_user.name" semantic conventions. It represents the username of + // the saved user. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "operator" + ProcessSavedUserNameKey = attribute.Key("process.saved_user.name") + + // ProcessSessionLeaderPIDKey is the attribute Key conforming to the + // "process.session_leader.pid" semantic conventions. It represents the PID of + // the process's session leader. This is also the session ID (SID) of the + // process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 14 + ProcessSessionLeaderPIDKey = attribute.Key("process.session_leader.pid") + + // ProcessTitleKey is the attribute Key conforming to the "process.title" + // semantic conventions. It represents the process title (proctitle). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cat /etc/hostname", "xfce4-session", "bash" + // Note: In many Unix-like systems, process title (proctitle), is the string + // that represents the name or command line of a running process, displayed by + // system monitoring tools like ps, top, and htop. + ProcessTitleKey = attribute.Key("process.title") + + // ProcessUserIDKey is the attribute Key conforming to the "process.user.id" + // semantic conventions. It represents the effective user ID (EUID) of the + // process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1001 + ProcessUserIDKey = attribute.Key("process.user.id") + + // ProcessUserNameKey is the attribute Key conforming to the "process.user.name" + // semantic conventions. It represents the username of the effective user of the + // process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "root" + ProcessUserNameKey = attribute.Key("process.user.name") + + // ProcessVpidKey is the attribute Key conforming to the "process.vpid" semantic + // conventions. It represents the virtual process identifier. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 12 + // Note: The process ID within a PID namespace. This is not necessarily unique + // across all processes on the host but it is unique within the process + // namespace that the process exists within. + ProcessVpidKey = attribute.Key("process.vpid") + + // ProcessWorkingDirectoryKey is the attribute Key conforming to the + // "process.working_directory" semantic conventions. It represents the working + // directory of the process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/root" + ProcessWorkingDirectoryKey = attribute.Key("process.working_directory") +) + +// ProcessArgsCount returns an attribute KeyValue conforming to the +// "process.args_count" semantic conventions. It represents the length of the +// process.command_args array. +func ProcessArgsCount(val int) attribute.KeyValue { + return ProcessArgsCountKey.Int(val) +} + +// ProcessCommand returns an attribute KeyValue conforming to the +// "process.command" semantic conventions. It represents the command used to +// launch the process (i.e. the command name). On Linux based systems, can be set +// to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to the +// first parameter extracted from `GetCommandLineW`. +func ProcessCommand(val string) attribute.KeyValue { + return ProcessCommandKey.String(val) +} + +// ProcessCommandArgs returns an attribute KeyValue conforming to the +// "process.command_args" semantic conventions. It represents the all the command +// arguments (including the command/executable itself) as received by the +// process. On Linux-based systems (and some other Unixoid systems supporting +// procfs), can be set according to the list of null-delimited strings extracted +// from `proc/[pid]/cmdline`. For libc-based executables, this would be the full +// argv vector passed to `main`. SHOULD NOT be collected by default unless there +// is sanitization that excludes sensitive data. +func ProcessCommandArgs(val ...string) attribute.KeyValue { + return ProcessCommandArgsKey.StringSlice(val) +} + +// ProcessCommandLine returns an attribute KeyValue conforming to the +// "process.command_line" semantic conventions. It represents the full command +// used to launch the process as a single string representing the full command. +// On Windows, can be set to the result of `GetCommandLineW`. Do not set this if +// you have to assemble it just for monitoring; use `process.command_args` +// instead. SHOULD NOT be collected by default unless there is sanitization that +// excludes sensitive data. +func ProcessCommandLine(val string) attribute.KeyValue { + return ProcessCommandLineKey.String(val) +} + +// ProcessCreationTime returns an attribute KeyValue conforming to the +// "process.creation.time" semantic conventions. It represents the date and time +// the process was created, in ISO 8601 format. +func ProcessCreationTime(val string) attribute.KeyValue { + return ProcessCreationTimeKey.String(val) +} + +// ProcessExecutableBuildIDGNU returns an attribute KeyValue conforming to the +// "process.executable.build_id.gnu" semantic conventions. It represents the GNU +// build ID as found in the `.note.gnu.build-id` ELF section (hex string). +func ProcessExecutableBuildIDGNU(val string) attribute.KeyValue { + return ProcessExecutableBuildIDGNUKey.String(val) +} + +// ProcessExecutableBuildIDGo returns an attribute KeyValue conforming to the +// "process.executable.build_id.go" semantic conventions. It represents the Go +// build ID as retrieved by `go tool buildid `. +func ProcessExecutableBuildIDGo(val string) attribute.KeyValue { + return ProcessExecutableBuildIDGoKey.String(val) +} + +// ProcessExecutableBuildIDHtlhash returns an attribute KeyValue conforming to +// the "process.executable.build_id.htlhash" semantic conventions. It represents +// the profiling specific build ID for executables. See the OTel specification +// for Profiles for more information. +func ProcessExecutableBuildIDHtlhash(val string) attribute.KeyValue { + return ProcessExecutableBuildIDHtlhashKey.String(val) +} + +// ProcessExecutableName returns an attribute KeyValue conforming to the +// "process.executable.name" semantic conventions. It represents the name of the +// process executable. On Linux based systems, this SHOULD be set to the base +// name of the target of `/proc/[pid]/exe`. On Windows, this SHOULD be set to the +// base name of `GetProcessImageFileNameW`. +func ProcessExecutableName(val string) attribute.KeyValue { + return ProcessExecutableNameKey.String(val) +} + +// ProcessExecutablePath returns an attribute KeyValue conforming to the +// "process.executable.path" semantic conventions. It represents the full path to +// the process executable. On Linux based systems, can be set to the target of +// `proc/[pid]/exe`. On Windows, can be set to the result of +// `GetProcessImageFileNameW`. +func ProcessExecutablePath(val string) attribute.KeyValue { + return ProcessExecutablePathKey.String(val) +} + +// ProcessExitCode returns an attribute KeyValue conforming to the +// "process.exit.code" semantic conventions. It represents the exit code of the +// process. +func ProcessExitCode(val int) attribute.KeyValue { + return ProcessExitCodeKey.Int(val) +} + +// ProcessExitTime returns an attribute KeyValue conforming to the +// "process.exit.time" semantic conventions. It represents the date and time the +// process exited, in ISO 8601 format. +func ProcessExitTime(val string) attribute.KeyValue { + return ProcessExitTimeKey.String(val) +} + +// ProcessGroupLeaderPID returns an attribute KeyValue conforming to the +// "process.group_leader.pid" semantic conventions. It represents the PID of the +// process's group leader. This is also the process group ID (PGID) of the +// process. +func ProcessGroupLeaderPID(val int) attribute.KeyValue { + return ProcessGroupLeaderPIDKey.Int(val) +} + +// ProcessInteractive returns an attribute KeyValue conforming to the +// "process.interactive" semantic conventions. It represents the whether the +// process is connected to an interactive shell. +func ProcessInteractive(val bool) attribute.KeyValue { + return ProcessInteractiveKey.Bool(val) +} + +// ProcessLinuxCgroup returns an attribute KeyValue conforming to the +// "process.linux.cgroup" semantic conventions. It represents the control group +// associated with the process. +func ProcessLinuxCgroup(val string) attribute.KeyValue { + return ProcessLinuxCgroupKey.String(val) +} + +// ProcessOwner returns an attribute KeyValue conforming to the "process.owner" +// semantic conventions. It represents the username of the user that owns the +// process. +func ProcessOwner(val string) attribute.KeyValue { + return ProcessOwnerKey.String(val) +} + +// ProcessParentPID returns an attribute KeyValue conforming to the +// "process.parent_pid" semantic conventions. It represents the parent Process +// identifier (PPID). +func ProcessParentPID(val int) attribute.KeyValue { + return ProcessParentPIDKey.Int(val) +} + +// ProcessPID returns an attribute KeyValue conforming to the "process.pid" +// semantic conventions. It represents the process identifier (PID). +func ProcessPID(val int) attribute.KeyValue { + return ProcessPIDKey.Int(val) +} + +// ProcessRealUserID returns an attribute KeyValue conforming to the +// "process.real_user.id" semantic conventions. It represents the real user ID +// (RUID) of the process. +func ProcessRealUserID(val int) attribute.KeyValue { + return ProcessRealUserIDKey.Int(val) +} + +// ProcessRealUserName returns an attribute KeyValue conforming to the +// "process.real_user.name" semantic conventions. It represents the username of +// the real user of the process. +func ProcessRealUserName(val string) attribute.KeyValue { + return ProcessRealUserNameKey.String(val) +} + +// ProcessRuntimeDescription returns an attribute KeyValue conforming to the +// "process.runtime.description" semantic conventions. It represents an +// additional description about the runtime of the process, for example a +// specific vendor customization of the runtime environment. +func ProcessRuntimeDescription(val string) attribute.KeyValue { + return ProcessRuntimeDescriptionKey.String(val) +} + +// ProcessRuntimeName returns an attribute KeyValue conforming to the +// "process.runtime.name" semantic conventions. It represents the name of the +// runtime of this process. +func ProcessRuntimeName(val string) attribute.KeyValue { + return ProcessRuntimeNameKey.String(val) +} + +// ProcessRuntimeVersion returns an attribute KeyValue conforming to the +// "process.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without modification. +func ProcessRuntimeVersion(val string) attribute.KeyValue { + return ProcessRuntimeVersionKey.String(val) +} + +// ProcessSavedUserID returns an attribute KeyValue conforming to the +// "process.saved_user.id" semantic conventions. It represents the saved user ID +// (SUID) of the process. +func ProcessSavedUserID(val int) attribute.KeyValue { + return ProcessSavedUserIDKey.Int(val) +} + +// ProcessSavedUserName returns an attribute KeyValue conforming to the +// "process.saved_user.name" semantic conventions. It represents the username of +// the saved user. +func ProcessSavedUserName(val string) attribute.KeyValue { + return ProcessSavedUserNameKey.String(val) +} + +// ProcessSessionLeaderPID returns an attribute KeyValue conforming to the +// "process.session_leader.pid" semantic conventions. It represents the PID of +// the process's session leader. This is also the session ID (SID) of the +// process. +func ProcessSessionLeaderPID(val int) attribute.KeyValue { + return ProcessSessionLeaderPIDKey.Int(val) +} + +// ProcessTitle returns an attribute KeyValue conforming to the "process.title" +// semantic conventions. It represents the process title (proctitle). +func ProcessTitle(val string) attribute.KeyValue { + return ProcessTitleKey.String(val) +} + +// ProcessUserID returns an attribute KeyValue conforming to the +// "process.user.id" semantic conventions. It represents the effective user ID +// (EUID) of the process. +func ProcessUserID(val int) attribute.KeyValue { + return ProcessUserIDKey.Int(val) +} + +// ProcessUserName returns an attribute KeyValue conforming to the +// "process.user.name" semantic conventions. It represents the username of the +// effective user of the process. +func ProcessUserName(val string) attribute.KeyValue { + return ProcessUserNameKey.String(val) +} + +// ProcessVpid returns an attribute KeyValue conforming to the "process.vpid" +// semantic conventions. It represents the virtual process identifier. +func ProcessVpid(val int) attribute.KeyValue { + return ProcessVpidKey.Int(val) +} + +// ProcessWorkingDirectory returns an attribute KeyValue conforming to the +// "process.working_directory" semantic conventions. It represents the working +// directory of the process. +func ProcessWorkingDirectory(val string) attribute.KeyValue { + return ProcessWorkingDirectoryKey.String(val) +} + +// Enum values for process.context_switch_type +var ( + // voluntary + // Stability: development + ProcessContextSwitchTypeVoluntary = ProcessContextSwitchTypeKey.String("voluntary") + // involuntary + // Stability: development + ProcessContextSwitchTypeInvoluntary = ProcessContextSwitchTypeKey.String("involuntary") +) + +// Enum values for process.paging.fault_type +var ( + // major + // Stability: development + ProcessPagingFaultTypeMajor = ProcessPagingFaultTypeKey.String("major") + // minor + // Stability: development + ProcessPagingFaultTypeMinor = ProcessPagingFaultTypeKey.String("minor") +) + +// Namespace: profile +const ( + // ProfileFrameTypeKey is the attribute Key conforming to the + // "profile.frame.type" semantic conventions. It represents the describes the + // interpreter or compiler of a single frame. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cpython" + ProfileFrameTypeKey = attribute.Key("profile.frame.type") +) + +// Enum values for profile.frame.type +var ( + // [.NET] + // + // Stability: development + // + // [.NET]: https://wikipedia.org/wiki/.NET + ProfileFrameTypeDotnet = ProfileFrameTypeKey.String("dotnet") + // [JVM] + // + // Stability: development + // + // [JVM]: https://wikipedia.org/wiki/Java_virtual_machine + ProfileFrameTypeJVM = ProfileFrameTypeKey.String("jvm") + // [Kernel] + // + // Stability: development + // + // [Kernel]: https://wikipedia.org/wiki/Kernel_(operating_system) + ProfileFrameTypeKernel = ProfileFrameTypeKey.String("kernel") + // Can be one of but not limited to [C], [C++], [Go] or [Rust]. If possible, a + // more precise value MUST be used. + // + // Stability: development + // + // [C]: https://wikipedia.org/wiki/C_(programming_language) + // [C++]: https://wikipedia.org/wiki/C%2B%2B + // [Go]: https://wikipedia.org/wiki/Go_(programming_language) + // [Rust]: https://wikipedia.org/wiki/Rust_(programming_language) + ProfileFrameTypeNative = ProfileFrameTypeKey.String("native") + // [Perl] + // + // Stability: development + // + // [Perl]: https://wikipedia.org/wiki/Perl + ProfileFrameTypePerl = ProfileFrameTypeKey.String("perl") + // [PHP] + // + // Stability: development + // + // [PHP]: https://wikipedia.org/wiki/PHP + ProfileFrameTypePHP = ProfileFrameTypeKey.String("php") + // [Python] + // + // Stability: development + // + // [Python]: https://wikipedia.org/wiki/Python_(programming_language) + ProfileFrameTypeCpython = ProfileFrameTypeKey.String("cpython") + // [Ruby] + // + // Stability: development + // + // [Ruby]: https://wikipedia.org/wiki/Ruby_(programming_language) + ProfileFrameTypeRuby = ProfileFrameTypeKey.String("ruby") + // [V8JS] + // + // Stability: development + // + // [V8JS]: https://wikipedia.org/wiki/V8_(JavaScript_engine) + ProfileFrameTypeV8JS = ProfileFrameTypeKey.String("v8js") + // [Erlang] + // + // Stability: development + // + // [Erlang]: https://en.wikipedia.org/wiki/BEAM_(Erlang_virtual_machine) + ProfileFrameTypeBeam = ProfileFrameTypeKey.String("beam") + // [Go], + // + // Stability: development + // + // [Go]: https://wikipedia.org/wiki/Go_(programming_language) + ProfileFrameTypeGo = ProfileFrameTypeKey.String("go") + // [Rust] + // + // Stability: development + // + // [Rust]: https://wikipedia.org/wiki/Rust_(programming_language) + ProfileFrameTypeRust = ProfileFrameTypeKey.String("rust") +) + +// Namespace: rpc +const ( + // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the + // "rpc.connect_rpc.error_code" semantic conventions. It represents the + // [error codes] of the Connect request. Error codes are always string values. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [error codes]: https://connectrpc.com//docs/protocol/#error-codes + RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") + + // RPCGRPCStatusCodeKey is the attribute Key conforming to the + // "rpc.grpc.status_code" semantic conventions. It represents the + // [numeric status code] of the gRPC request. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [numeric status code]: https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") + + // RPCJSONRPCErrorCodeKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_code" semantic conventions. It represents the `error.code` + // property of response if it is an error response. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: -32700, 100 + RPCJSONRPCErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + + // RPCJSONRPCErrorMessageKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_message" semantic conventions. It represents the + // `error.message` property of response if it is an error response. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Parse error", "User already exists" + RPCJSONRPCErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") + + // RPCJSONRPCRequestIDKey is the attribute Key conforming to the + // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` + // property of request or response. Since protocol allows id to be int, string, + // `null` or missing (for notifications), value is expected to be cast to string + // for simplicity. Use empty string in case of `null` value. Omit entirely if + // this is a notification. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "10", "request-7", "" + RPCJSONRPCRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + + // RPCJSONRPCVersionKey is the attribute Key conforming to the + // "rpc.jsonrpc.version" semantic conventions. It represents the protocol + // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 + // doesn't specify this, the value can be omitted. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2.0", "1.0" + RPCJSONRPCVersionKey = attribute.Key("rpc.jsonrpc.version") + + // RPCMessageCompressedSizeKey is the attribute Key conforming to the + // "rpc.message.compressed_size" semantic conventions. It represents the + // compressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + RPCMessageCompressedSizeKey = attribute.Key("rpc.message.compressed_size") + + // RPCMessageIDKey is the attribute Key conforming to the "rpc.message.id" + // semantic conventions. It MUST be calculated as two different counters + // starting from `1` one for sent messages and one for received message.. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: This way we guarantee that the values will be consistent between + // different implementations. + RPCMessageIDKey = attribute.Key("rpc.message.id") + + // RPCMessageTypeKey is the attribute Key conforming to the "rpc.message.type" + // semantic conventions. It represents the whether this is a received or sent + // message. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + RPCMessageTypeKey = attribute.Key("rpc.message.type") + + // RPCMessageUncompressedSizeKey is the attribute Key conforming to the + // "rpc.message.uncompressed_size" semantic conventions. It represents the + // uncompressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + RPCMessageUncompressedSizeKey = attribute.Key("rpc.message.uncompressed_size") + + // RPCMethodKey is the attribute Key conforming to the "rpc.method" semantic + // conventions. It represents the name of the (logical) method being called, + // must be equal to the $method part in the span name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: exampleMethod + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function.name` attribute may be used to store the + // latter (e.g., method actually executing the call on the server side, RPC + // client stub method on the client side). + RPCMethodKey = attribute.Key("rpc.method") + + // RPCServiceKey is the attribute Key conforming to the "rpc.service" semantic + // conventions. It represents the full (logical) name of the service being + // called, including its package name, if applicable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myservice.EchoService + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing class. + // The `code.namespace` attribute may be used to store the latter (despite the + // attribute name, it may include a class name; e.g., class with method actually + // executing the call on the server side, RPC client stub class on the client + // side). + RPCServiceKey = attribute.Key("rpc.service") + + // RPCSystemKey is the attribute Key conforming to the "rpc.system" semantic + // conventions. It represents a string identifying the remoting system. See + // below for a list of well-known identifiers. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + RPCSystemKey = attribute.Key("rpc.system") +) + +// RPCJSONRPCErrorCode returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_code" semantic conventions. It represents the `error.code` +// property of response if it is an error response. +func RPCJSONRPCErrorCode(val int) attribute.KeyValue { + return RPCJSONRPCErrorCodeKey.Int(val) +} + +// RPCJSONRPCErrorMessage returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_message" semantic conventions. It represents the +// `error.message` property of response if it is an error response. +func RPCJSONRPCErrorMessage(val string) attribute.KeyValue { + return RPCJSONRPCErrorMessageKey.String(val) +} + +// RPCJSONRPCRequestID returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` property +// of request or response. Since protocol allows id to be int, string, `null` or +// missing (for notifications), value is expected to be cast to string for +// simplicity. Use empty string in case of `null` value. Omit entirely if this is +// a notification. +func RPCJSONRPCRequestID(val string) attribute.KeyValue { + return RPCJSONRPCRequestIDKey.String(val) +} + +// RPCJSONRPCVersion returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.version" semantic conventions. It represents the protocol version +// as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 doesn't +// specify this, the value can be omitted. +func RPCJSONRPCVersion(val string) attribute.KeyValue { + return RPCJSONRPCVersionKey.String(val) +} + +// RPCMessageCompressedSize returns an attribute KeyValue conforming to the +// "rpc.message.compressed_size" semantic conventions. It represents the +// compressed size of the message in bytes. +func RPCMessageCompressedSize(val int) attribute.KeyValue { + return RPCMessageCompressedSizeKey.Int(val) +} + +// RPCMessageID returns an attribute KeyValue conforming to the "rpc.message.id" +// semantic conventions. It MUST be calculated as two different counters starting +// from `1` one for sent messages and one for received message.. +func RPCMessageID(val int) attribute.KeyValue { + return RPCMessageIDKey.Int(val) +} + +// RPCMessageUncompressedSize returns an attribute KeyValue conforming to the +// "rpc.message.uncompressed_size" semantic conventions. It represents the +// uncompressed size of the message in bytes. +func RPCMessageUncompressedSize(val int) attribute.KeyValue { + return RPCMessageUncompressedSizeKey.Int(val) +} + +// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" +// semantic conventions. It represents the name of the (logical) method being +// called, must be equal to the $method part in the span name. +func RPCMethod(val string) attribute.KeyValue { + return RPCMethodKey.String(val) +} + +// RPCService returns an attribute KeyValue conforming to the "rpc.service" +// semantic conventions. It represents the full (logical) name of the service +// being called, including its package name, if applicable. +func RPCService(val string) attribute.KeyValue { + return RPCServiceKey.String(val) +} + +// Enum values for rpc.connect_rpc.error_code +var ( + // cancelled + // Stability: development + RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") + // unknown + // Stability: development + RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") + // invalid_argument + // Stability: development + RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") + // deadline_exceeded + // Stability: development + RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") + // not_found + // Stability: development + RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") + // already_exists + // Stability: development + RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") + // permission_denied + // Stability: development + RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") + // resource_exhausted + // Stability: development + RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") + // failed_precondition + // Stability: development + RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") + // aborted + // Stability: development + RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") + // out_of_range + // Stability: development + RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") + // unimplemented + // Stability: development + RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") + // internal + // Stability: development + RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") + // unavailable + // Stability: development + RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") + // data_loss + // Stability: development + RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") + // unauthenticated + // Stability: development + RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") +) + +// Enum values for rpc.grpc.status_code +var ( + // OK + // Stability: development + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + // Stability: development + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + // Stability: development + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + // Stability: development + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + // Stability: development + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + // Stability: development + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + // Stability: development + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + // Stability: development + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + // Stability: development + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + // Stability: development + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + // Stability: development + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + // Stability: development + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + // Stability: development + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + // Stability: development + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + // Stability: development + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + // Stability: development + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + // Stability: development + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +// Enum values for rpc.message.type +var ( + // sent + // Stability: development + RPCMessageTypeSent = RPCMessageTypeKey.String("SENT") + // received + // Stability: development + RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED") +) + +// Enum values for rpc.system +var ( + // gRPC + // Stability: development + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + // Stability: development + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + // Stability: development + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + // Stability: development + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") + // Connect RPC + // Stability: development + RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") +) + +// Namespace: security_rule +const ( + // SecurityRuleCategoryKey is the attribute Key conforming to the + // "security_rule.category" semantic conventions. It represents a categorization + // value keyword used by the entity using the rule for detection of this event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Attempted Information Leak" + SecurityRuleCategoryKey = attribute.Key("security_rule.category") + + // SecurityRuleDescriptionKey is the attribute Key conforming to the + // "security_rule.description" semantic conventions. It represents the + // description of the rule generating the event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Block requests to public DNS over HTTPS / TLS protocols" + SecurityRuleDescriptionKey = attribute.Key("security_rule.description") + + // SecurityRuleLicenseKey is the attribute Key conforming to the + // "security_rule.license" semantic conventions. It represents the name of the + // license under which the rule used to generate this event is made available. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Apache 2.0" + SecurityRuleLicenseKey = attribute.Key("security_rule.license") + + // SecurityRuleNameKey is the attribute Key conforming to the + // "security_rule.name" semantic conventions. It represents the name of the rule + // or signature generating the event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "BLOCK_DNS_over_TLS" + SecurityRuleNameKey = attribute.Key("security_rule.name") + + // SecurityRuleReferenceKey is the attribute Key conforming to the + // "security_rule.reference" semantic conventions. It represents the reference + // URL to additional information about the rule used to generate this event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://en.wikipedia.org/wiki/DNS_over_TLS" + // Note: The URL can point to the vendor’s documentation about the rule. If + // that’s not available, it can also be a link to a more general page + // describing this type of alert. + SecurityRuleReferenceKey = attribute.Key("security_rule.reference") + + // SecurityRuleRulesetNameKey is the attribute Key conforming to the + // "security_rule.ruleset.name" semantic conventions. It represents the name of + // the ruleset, policy, group, or parent category in which the rule used to + // generate this event is a member. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Standard_Protocol_Filters" + SecurityRuleRulesetNameKey = attribute.Key("security_rule.ruleset.name") + + // SecurityRuleUUIDKey is the attribute Key conforming to the + // "security_rule.uuid" semantic conventions. It represents a rule ID that is + // unique within the scope of a set or group of agents, observers, or other + // entities using the rule for detection of this event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "550e8400-e29b-41d4-a716-446655440000", "1100110011" + SecurityRuleUUIDKey = attribute.Key("security_rule.uuid") + + // SecurityRuleVersionKey is the attribute Key conforming to the + // "security_rule.version" semantic conventions. It represents the version / + // revision of the rule being used for analysis. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1.0.0" + SecurityRuleVersionKey = attribute.Key("security_rule.version") +) + +// SecurityRuleCategory returns an attribute KeyValue conforming to the +// "security_rule.category" semantic conventions. It represents a categorization +// value keyword used by the entity using the rule for detection of this event. +func SecurityRuleCategory(val string) attribute.KeyValue { + return SecurityRuleCategoryKey.String(val) +} + +// SecurityRuleDescription returns an attribute KeyValue conforming to the +// "security_rule.description" semantic conventions. It represents the +// description of the rule generating the event. +func SecurityRuleDescription(val string) attribute.KeyValue { + return SecurityRuleDescriptionKey.String(val) +} + +// SecurityRuleLicense returns an attribute KeyValue conforming to the +// "security_rule.license" semantic conventions. It represents the name of the +// license under which the rule used to generate this event is made available. +func SecurityRuleLicense(val string) attribute.KeyValue { + return SecurityRuleLicenseKey.String(val) +} + +// SecurityRuleName returns an attribute KeyValue conforming to the +// "security_rule.name" semantic conventions. It represents the name of the rule +// or signature generating the event. +func SecurityRuleName(val string) attribute.KeyValue { + return SecurityRuleNameKey.String(val) +} + +// SecurityRuleReference returns an attribute KeyValue conforming to the +// "security_rule.reference" semantic conventions. It represents the reference +// URL to additional information about the rule used to generate this event. +func SecurityRuleReference(val string) attribute.KeyValue { + return SecurityRuleReferenceKey.String(val) +} + +// SecurityRuleRulesetName returns an attribute KeyValue conforming to the +// "security_rule.ruleset.name" semantic conventions. It represents the name of +// the ruleset, policy, group, or parent category in which the rule used to +// generate this event is a member. +func SecurityRuleRulesetName(val string) attribute.KeyValue { + return SecurityRuleRulesetNameKey.String(val) +} + +// SecurityRuleUUID returns an attribute KeyValue conforming to the +// "security_rule.uuid" semantic conventions. It represents a rule ID that is +// unique within the scope of a set or group of agents, observers, or other +// entities using the rule for detection of this event. +func SecurityRuleUUID(val string) attribute.KeyValue { + return SecurityRuleUUIDKey.String(val) +} + +// SecurityRuleVersion returns an attribute KeyValue conforming to the +// "security_rule.version" semantic conventions. It represents the version / +// revision of the rule being used for analysis. +func SecurityRuleVersion(val string) attribute.KeyValue { + return SecurityRuleVersionKey.String(val) +} + +// Namespace: server +const ( + // ServerAddressKey is the attribute Key conforming to the "server.address" + // semantic conventions. It represents the server domain name if available + // without reverse DNS lookup; otherwise, IP address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "example.com", "10.1.2.80", "/tmp/my.sock" + // Note: When observed from the client side, and when communicating through an + // intermediary, `server.address` SHOULD represent the server address behind any + // intermediaries, for example proxies, if it's available. + ServerAddressKey = attribute.Key("server.address") + + // ServerPortKey is the attribute Key conforming to the "server.port" semantic + // conventions. It represents the server port number. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 80, 8080, 443 + // Note: When observed from the client side, and when communicating through an + // intermediary, `server.port` SHOULD represent the server port behind any + // intermediaries, for example proxies, if it's available. + ServerPortKey = attribute.Key("server.port") +) + +// ServerAddress returns an attribute KeyValue conforming to the "server.address" +// semantic conventions. It represents the server domain name if available +// without reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func ServerAddress(val string) attribute.KeyValue { + return ServerAddressKey.String(val) +} + +// ServerPort returns an attribute KeyValue conforming to the "server.port" +// semantic conventions. It represents the server port number. +func ServerPort(val int) attribute.KeyValue { + return ServerPortKey.Int(val) +} + +// Namespace: service +const ( + // ServiceInstanceIDKey is the attribute Key conforming to the + // "service.instance.id" semantic conventions. It represents the string ID of + // the service instance. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "627cc493-f310-47de-96bd-71410b7dec09" + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be globally + // unique). The ID helps to + // distinguish instances of the same service that exist at the same time (e.g. + // instances of a horizontally scaled + // service). + // + // Implementations, such as SDKs, are recommended to generate a random Version 1 + // or Version 4 [RFC + // 4122] UUID, but are free to use an inherent unique ID as + // the source of + // this value if stability is desirable. In that case, the ID SHOULD be used as + // source of a UUID Version 5 and + // SHOULD use the following UUID as the namespace: + // `4d63009a-8d0f-11ee-aad7-4c796ed8e320`. + // + // UUIDs are typically recommended, as only an opaque value for the purposes of + // identifying a service instance is + // needed. Similar to what can be seen in the man page for the + // [`/etc/machine-id`] file, the underlying + // data, such as pod name and namespace should be treated as confidential, being + // the user's choice to expose it + // or not via another resource attribute. + // + // For applications running behind an application server (like unicorn), we do + // not recommend using one identifier + // for all processes participating in the application. Instead, it's recommended + // each division (e.g. a worker + // thread in unicorn) to have its own instance.id. + // + // It's not recommended for a Collector to set `service.instance.id` if it can't + // unambiguously determine the + // service instance that is generating that telemetry. For instance, creating an + // UUID based on `pod.name` will + // likely be wrong, as the Collector might not know from which container within + // that pod the telemetry originated. + // However, Collectors can set the `service.instance.id` if they can + // unambiguously determine the service instance + // for that telemetry. This is typically the case for scraping receivers, as + // they know the target address and + // port. + // + // [RFC + // 4122]: https://www.ietf.org/rfc/rfc4122.txt + // [`/etc/machine-id`]: https://www.freedesktop.org/software/systemd/man/latest/machine-id.html + ServiceInstanceIDKey = attribute.Key("service.instance.id") + + // ServiceNameKey is the attribute Key conforming to the "service.name" semantic + // conventions. It represents the logical name of the service. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "shoppingcart" + // Note: MUST be the same for all instances of horizontally scaled services. If + // the value was not specified, SDKs MUST fallback to `unknown_service:` + // concatenated with [`process.executable.name`], e.g. `unknown_service:bash`. + // If `process.executable.name` is not available, the value MUST be set to + // `unknown_service`. + // + // [`process.executable.name`]: process.md + ServiceNameKey = attribute.Key("service.name") + + // ServiceNamespaceKey is the attribute Key conforming to the + // "service.namespace" semantic conventions. It represents a namespace for + // `service.name`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Shop" + // Note: A string value having a meaning that helps to distinguish a group of + // services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` is + // expected to be unique for all services that have no explicit namespace + // defined (so the empty/unspecified namespace is simply one more valid + // namespace). Zero-length namespace string is assumed equal to unspecified + // namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + + // ServiceVersionKey is the attribute Key conforming to the "service.version" + // semantic conventions. It represents the version string of the service API or + // implementation. The format is not defined by these conventions. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "2.0.0", "a01dbef8a" + ServiceVersionKey = attribute.Key("service.version") +) + +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of the +// service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// ServiceName returns an attribute KeyValue conforming to the "service.name" +// semantic conventions. It represents the logical name of the service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) +} + +// ServiceNamespace returns an attribute KeyValue conforming to the +// "service.namespace" semantic conventions. It represents a namespace for +// `service.name`. +func ServiceNamespace(val string) attribute.KeyValue { + return ServiceNamespaceKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. The format is not defined by these +// conventions. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + +// Namespace: session +const ( + // SessionIDKey is the attribute Key conforming to the "session.id" semantic + // conventions. It represents a unique id to identify a session. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 00112233-4455-6677-8899-aabbccddeeff + SessionIDKey = attribute.Key("session.id") + + // SessionPreviousIDKey is the attribute Key conforming to the + // "session.previous_id" semantic conventions. It represents the previous + // `session.id` for this user, when known. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 00112233-4455-6677-8899-aabbccddeeff + SessionPreviousIDKey = attribute.Key("session.previous_id") +) + +// SessionID returns an attribute KeyValue conforming to the "session.id" +// semantic conventions. It represents a unique id to identify a session. +func SessionID(val string) attribute.KeyValue { + return SessionIDKey.String(val) +} + +// SessionPreviousID returns an attribute KeyValue conforming to the +// "session.previous_id" semantic conventions. It represents the previous +// `session.id` for this user, when known. +func SessionPreviousID(val string) attribute.KeyValue { + return SessionPreviousIDKey.String(val) +} + +// Namespace: signalr +const ( + // SignalRConnectionStatusKey is the attribute Key conforming to the + // "signalr.connection.status" semantic conventions. It represents the signalR + // HTTP connection closure status. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "app_shutdown", "timeout" + SignalRConnectionStatusKey = attribute.Key("signalr.connection.status") + + // SignalRTransportKey is the attribute Key conforming to the + // "signalr.transport" semantic conventions. It represents the + // [SignalR transport type]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "web_sockets", "long_polling" + // + // [SignalR transport type]: https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md + SignalRTransportKey = attribute.Key("signalr.transport") +) + +// Enum values for signalr.connection.status +var ( + // The connection was closed normally. + // Stability: stable + SignalRConnectionStatusNormalClosure = SignalRConnectionStatusKey.String("normal_closure") + // The connection was closed due to a timeout. + // Stability: stable + SignalRConnectionStatusTimeout = SignalRConnectionStatusKey.String("timeout") + // The connection was closed because the app is shutting down. + // Stability: stable + SignalRConnectionStatusAppShutdown = SignalRConnectionStatusKey.String("app_shutdown") +) + +// Enum values for signalr.transport +var ( + // ServerSentEvents protocol + // Stability: stable + SignalRTransportServerSentEvents = SignalRTransportKey.String("server_sent_events") + // LongPolling protocol + // Stability: stable + SignalRTransportLongPolling = SignalRTransportKey.String("long_polling") + // WebSockets protocol + // Stability: stable + SignalRTransportWebSockets = SignalRTransportKey.String("web_sockets") +) + +// Namespace: source +const ( + // SourceAddressKey is the attribute Key conforming to the "source.address" + // semantic conventions. It represents the source address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix domain + // socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "source.example.com", "10.1.2.80", "/tmp/my.sock" + // Note: When observed from the destination side, and when communicating through + // an intermediary, `source.address` SHOULD represent the source address behind + // any intermediaries, for example proxies, if it's available. + SourceAddressKey = attribute.Key("source.address") + + // SourcePortKey is the attribute Key conforming to the "source.port" semantic + // conventions. It represents the source port number. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 3389, 2888 + SourcePortKey = attribute.Key("source.port") +) + +// SourceAddress returns an attribute KeyValue conforming to the "source.address" +// semantic conventions. It represents the source address - domain name if +// available without reverse DNS lookup; otherwise, IP address or Unix domain +// socket name. +func SourceAddress(val string) attribute.KeyValue { + return SourceAddressKey.String(val) +} + +// SourcePort returns an attribute KeyValue conforming to the "source.port" +// semantic conventions. It represents the source port number. +func SourcePort(val int) attribute.KeyValue { + return SourcePortKey.Int(val) +} + +// Namespace: system +const ( + // SystemCPULogicalNumberKey is the attribute Key conforming to the + // "system.cpu.logical_number" semantic conventions. It represents the + // deprecated, use `cpu.logical_number` instead. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1 + SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number") + + // SystemDeviceKey is the attribute Key conforming to the "system.device" + // semantic conventions. It represents the device identifier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "(identifier)" + SystemDeviceKey = attribute.Key("system.device") + + // SystemFilesystemModeKey is the attribute Key conforming to the + // "system.filesystem.mode" semantic conventions. It represents the filesystem + // mode. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "rw, ro" + SystemFilesystemModeKey = attribute.Key("system.filesystem.mode") + + // SystemFilesystemMountpointKey is the attribute Key conforming to the + // "system.filesystem.mountpoint" semantic conventions. It represents the + // filesystem mount path. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/mnt/data" + SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint") + + // SystemFilesystemStateKey is the attribute Key conforming to the + // "system.filesystem.state" semantic conventions. It represents the filesystem + // state. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "used" + SystemFilesystemStateKey = attribute.Key("system.filesystem.state") + + // SystemFilesystemTypeKey is the attribute Key conforming to the + // "system.filesystem.type" semantic conventions. It represents the filesystem + // type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ext4" + SystemFilesystemTypeKey = attribute.Key("system.filesystem.type") + + // SystemMemoryStateKey is the attribute Key conforming to the + // "system.memory.state" semantic conventions. It represents the memory state. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "free", "cached" + SystemMemoryStateKey = attribute.Key("system.memory.state") + + // SystemPagingDirectionKey is the attribute Key conforming to the + // "system.paging.direction" semantic conventions. It represents the paging + // access direction. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "in" + SystemPagingDirectionKey = attribute.Key("system.paging.direction") + + // SystemPagingStateKey is the attribute Key conforming to the + // "system.paging.state" semantic conventions. It represents the memory paging + // state. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "free" + SystemPagingStateKey = attribute.Key("system.paging.state") + + // SystemPagingTypeKey is the attribute Key conforming to the + // "system.paging.type" semantic conventions. It represents the memory paging + // type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "minor" + SystemPagingTypeKey = attribute.Key("system.paging.type") + + // SystemProcessStatusKey is the attribute Key conforming to the + // "system.process.status" semantic conventions. It represents the process + // state, e.g., [Linux Process State Codes]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "running" + // + // [Linux Process State Codes]: https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES + SystemProcessStatusKey = attribute.Key("system.process.status") +) + +// SystemCPULogicalNumber returns an attribute KeyValue conforming to the +// "system.cpu.logical_number" semantic conventions. It represents the +// deprecated, use `cpu.logical_number` instead. +func SystemCPULogicalNumber(val int) attribute.KeyValue { + return SystemCPULogicalNumberKey.Int(val) +} + +// SystemDevice returns an attribute KeyValue conforming to the "system.device" +// semantic conventions. It represents the device identifier. +func SystemDevice(val string) attribute.KeyValue { + return SystemDeviceKey.String(val) +} + +// SystemFilesystemMode returns an attribute KeyValue conforming to the +// "system.filesystem.mode" semantic conventions. It represents the filesystem +// mode. +func SystemFilesystemMode(val string) attribute.KeyValue { + return SystemFilesystemModeKey.String(val) +} + +// SystemFilesystemMountpoint returns an attribute KeyValue conforming to the +// "system.filesystem.mountpoint" semantic conventions. It represents the +// filesystem mount path. +func SystemFilesystemMountpoint(val string) attribute.KeyValue { + return SystemFilesystemMountpointKey.String(val) +} + +// Enum values for system.filesystem.state +var ( + // used + // Stability: development + SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used") + // free + // Stability: development + SystemFilesystemStateFree = SystemFilesystemStateKey.String("free") + // reserved + // Stability: development + SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved") +) + +// Enum values for system.filesystem.type +var ( + // fat32 + // Stability: development + SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32") + // exfat + // Stability: development + SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat") + // ntfs + // Stability: development + SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs") + // refs + // Stability: development + SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs") + // hfsplus + // Stability: development + SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus") + // ext4 + // Stability: development + SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4") +) + +// Enum values for system.memory.state +var ( + // used + // Stability: development + SystemMemoryStateUsed = SystemMemoryStateKey.String("used") + // free + // Stability: development + SystemMemoryStateFree = SystemMemoryStateKey.String("free") + // Deprecated: Removed, report shared memory usage with + // `metric.system.memory.shared` metric. + SystemMemoryStateShared = SystemMemoryStateKey.String("shared") + // buffers + // Stability: development + SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers") + // cached + // Stability: development + SystemMemoryStateCached = SystemMemoryStateKey.String("cached") +) + +// Enum values for system.paging.direction +var ( + // in + // Stability: development + SystemPagingDirectionIn = SystemPagingDirectionKey.String("in") + // out + // Stability: development + SystemPagingDirectionOut = SystemPagingDirectionKey.String("out") +) + +// Enum values for system.paging.state +var ( + // used + // Stability: development + SystemPagingStateUsed = SystemPagingStateKey.String("used") + // free + // Stability: development + SystemPagingStateFree = SystemPagingStateKey.String("free") +) + +// Enum values for system.paging.type +var ( + // major + // Stability: development + SystemPagingTypeMajor = SystemPagingTypeKey.String("major") + // minor + // Stability: development + SystemPagingTypeMinor = SystemPagingTypeKey.String("minor") +) + +// Enum values for system.process.status +var ( + // running + // Stability: development + SystemProcessStatusRunning = SystemProcessStatusKey.String("running") + // sleeping + // Stability: development + SystemProcessStatusSleeping = SystemProcessStatusKey.String("sleeping") + // stopped + // Stability: development + SystemProcessStatusStopped = SystemProcessStatusKey.String("stopped") + // defunct + // Stability: development + SystemProcessStatusDefunct = SystemProcessStatusKey.String("defunct") +) + +// Namespace: telemetry +const ( + // TelemetryDistroNameKey is the attribute Key conforming to the + // "telemetry.distro.name" semantic conventions. It represents the name of the + // auto instrumentation agent or distribution, if used. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "parts-unlimited-java" + // Note: Official auto instrumentation agents and distributions SHOULD set the + // `telemetry.distro.name` attribute to + // a string starting with `opentelemetry-`, e.g. + // `opentelemetry-java-instrumentation`. + TelemetryDistroNameKey = attribute.Key("telemetry.distro.name") + + // TelemetryDistroVersionKey is the attribute Key conforming to the + // "telemetry.distro.version" semantic conventions. It represents the version + // string of the auto instrumentation agent or distribution, if used. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1.2.3" + TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version") + + // TelemetrySDKLanguageKey is the attribute Key conforming to the + // "telemetry.sdk.language" semantic conventions. It represents the language of + // the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + + // TelemetrySDKNameKey is the attribute Key conforming to the + // "telemetry.sdk.name" semantic conventions. It represents the name of the + // telemetry SDK as defined above. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "opentelemetry" + // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute to + // `opentelemetry`. + // If another SDK, like a fork or a vendor-provided implementation, is used, + // this SDK MUST set the + // `telemetry.sdk.name` attribute to the fully-qualified class or module name of + // this SDK's main entry point + // or another suitable identifier depending on the language. + // The identifier `opentelemetry` is reserved and MUST NOT be used in this case. + // All custom identifiers SHOULD be stable across different versions of an + // implementation. + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + + // TelemetrySDKVersionKey is the attribute Key conforming to the + // "telemetry.sdk.version" semantic conventions. It represents the version + // string of the telemetry SDK. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "1.2.3" + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") +) + +// TelemetryDistroName returns an attribute KeyValue conforming to the +// "telemetry.distro.name" semantic conventions. It represents the name of the +// auto instrumentation agent or distribution, if used. +func TelemetryDistroName(val string) attribute.KeyValue { + return TelemetryDistroNameKey.String(val) +} + +// TelemetryDistroVersion returns an attribute KeyValue conforming to the +// "telemetry.distro.version" semantic conventions. It represents the version +// string of the auto instrumentation agent or distribution, if used. +func TelemetryDistroVersion(val string) attribute.KeyValue { + return TelemetryDistroVersionKey.String(val) +} + +// TelemetrySDKName returns an attribute KeyValue conforming to the +// "telemetry.sdk.name" semantic conventions. It represents the name of the +// telemetry SDK as defined above. +func TelemetrySDKName(val string) attribute.KeyValue { + return TelemetrySDKNameKey.String(val) +} + +// TelemetrySDKVersion returns an attribute KeyValue conforming to the +// "telemetry.sdk.version" semantic conventions. It represents the version string +// of the telemetry SDK. +func TelemetrySDKVersion(val string) attribute.KeyValue { + return TelemetrySDKVersionKey.String(val) +} + +// Enum values for telemetry.sdk.language +var ( + // cpp + // Stability: stable + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + // Stability: stable + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + // Stability: stable + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + // Stability: stable + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + // Stability: stable + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + // Stability: stable + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + // Stability: stable + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + // Stability: stable + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + // Stability: stable + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // rust + // Stability: stable + TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") + // swift + // Stability: stable + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") + // webjs + // Stability: stable + TelemetrySDKLanguageWebJS = TelemetrySDKLanguageKey.String("webjs") +) + +// Namespace: test +const ( + // TestCaseNameKey is the attribute Key conforming to the "test.case.name" + // semantic conventions. It represents the fully qualified human readable name + // of the [test case]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "org.example.TestCase1.test1", "example/tests/TestCase1.test1", + // "ExampleTestCase1_test1" + // + // [test case]: https://wikipedia.org/wiki/Test_case + TestCaseNameKey = attribute.Key("test.case.name") + + // TestCaseResultStatusKey is the attribute Key conforming to the + // "test.case.result.status" semantic conventions. It represents the status of + // the actual test case result from test execution. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "pass", "fail" + TestCaseResultStatusKey = attribute.Key("test.case.result.status") + + // TestSuiteNameKey is the attribute Key conforming to the "test.suite.name" + // semantic conventions. It represents the human readable name of a [test suite] + // . + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "TestSuite1" + // + // [test suite]: https://wikipedia.org/wiki/Test_suite + TestSuiteNameKey = attribute.Key("test.suite.name") + + // TestSuiteRunStatusKey is the attribute Key conforming to the + // "test.suite.run.status" semantic conventions. It represents the status of the + // test suite run. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "success", "failure", "skipped", "aborted", "timed_out", + // "in_progress" + TestSuiteRunStatusKey = attribute.Key("test.suite.run.status") +) + +// TestCaseName returns an attribute KeyValue conforming to the "test.case.name" +// semantic conventions. It represents the fully qualified human readable name of +// the [test case]. +// +// [test case]: https://wikipedia.org/wiki/Test_case +func TestCaseName(val string) attribute.KeyValue { + return TestCaseNameKey.String(val) +} + +// TestSuiteName returns an attribute KeyValue conforming to the +// "test.suite.name" semantic conventions. It represents the human readable name +// of a [test suite]. +// +// [test suite]: https://wikipedia.org/wiki/Test_suite +func TestSuiteName(val string) attribute.KeyValue { + return TestSuiteNameKey.String(val) +} + +// Enum values for test.case.result.status +var ( + // pass + // Stability: development + TestCaseResultStatusPass = TestCaseResultStatusKey.String("pass") + // fail + // Stability: development + TestCaseResultStatusFail = TestCaseResultStatusKey.String("fail") +) + +// Enum values for test.suite.run.status +var ( + // success + // Stability: development + TestSuiteRunStatusSuccess = TestSuiteRunStatusKey.String("success") + // failure + // Stability: development + TestSuiteRunStatusFailure = TestSuiteRunStatusKey.String("failure") + // skipped + // Stability: development + TestSuiteRunStatusSkipped = TestSuiteRunStatusKey.String("skipped") + // aborted + // Stability: development + TestSuiteRunStatusAborted = TestSuiteRunStatusKey.String("aborted") + // timed_out + // Stability: development + TestSuiteRunStatusTimedOut = TestSuiteRunStatusKey.String("timed_out") + // in_progress + // Stability: development + TestSuiteRunStatusInProgress = TestSuiteRunStatusKey.String("in_progress") +) + +// Namespace: thread +const ( + // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic + // conventions. It represents the current "managed" thread ID (as opposed to OS + // thread ID). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + ThreadIDKey = attribute.Key("thread.id") + + // ThreadNameKey is the attribute Key conforming to the "thread.name" semantic + // conventions. It represents the current thread name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: main + ThreadNameKey = attribute.Key("thread.name") +) + +// ThreadID returns an attribute KeyValue conforming to the "thread.id" semantic +// conventions. It represents the current "managed" thread ID (as opposed to OS +// thread ID). +func ThreadID(val int) attribute.KeyValue { + return ThreadIDKey.Int(val) +} + +// ThreadName returns an attribute KeyValue conforming to the "thread.name" +// semantic conventions. It represents the current thread name. +func ThreadName(val string) attribute.KeyValue { + return ThreadNameKey.String(val) +} + +// Namespace: tls +const ( + // TLSCipherKey is the attribute Key conforming to the "tls.cipher" semantic + // conventions. It represents the string indicating the [cipher] used during the + // current connection. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "TLS_RSA_WITH_3DES_EDE_CBC_SHA", + // "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256" + // Note: The values allowed for `tls.cipher` MUST be one of the `Descriptions` + // of the [registered TLS Cipher Suits]. + // + // [cipher]: https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5 + // [registered TLS Cipher Suits]: https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4 + TLSCipherKey = attribute.Key("tls.cipher") + + // TLSClientCertificateKey is the attribute Key conforming to the + // "tls.client.certificate" semantic conventions. It represents the PEM-encoded + // stand-alone certificate offered by the client. This is usually + // mutually-exclusive of `client.certificate_chain` since this value also exists + // in that list. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MII..." + TLSClientCertificateKey = attribute.Key("tls.client.certificate") + + // TLSClientCertificateChainKey is the attribute Key conforming to the + // "tls.client.certificate_chain" semantic conventions. It represents the array + // of PEM-encoded certificates that make up the certificate chain offered by the + // client. This is usually mutually-exclusive of `client.certificate` since that + // value should be the first certificate in the chain. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MII...", "MI..." + TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain") + + // TLSClientHashMd5Key is the attribute Key conforming to the + // "tls.client.hash.md5" semantic conventions. It represents the certificate + // fingerprint using the MD5 digest of DER-encoded version of certificate + // offered by the client. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC" + TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5") + + // TLSClientHashSha1Key is the attribute Key conforming to the + // "tls.client.hash.sha1" semantic conventions. It represents the certificate + // fingerprint using the SHA1 digest of DER-encoded version of certificate + // offered by the client. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9E393D93138888D288266C2D915214D1D1CCEB2A" + TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1") + + // TLSClientHashSha256Key is the attribute Key conforming to the + // "tls.client.hash.sha256" semantic conventions. It represents the certificate + // fingerprint using the SHA256 digest of DER-encoded version of certificate + // offered by the client. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0" + TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256") + + // TLSClientIssuerKey is the attribute Key conforming to the "tls.client.issuer" + // semantic conventions. It represents the distinguished name of [subject] of + // the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com" + // + // [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 + TLSClientIssuerKey = attribute.Key("tls.client.issuer") + + // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3" + // semantic conventions. It represents a hash that identifies clients based on + // how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "d4e5b18d6b55c71272893221c96ba240" + TLSClientJa3Key = attribute.Key("tls.client.ja3") + + // TLSClientNotAfterKey is the attribute Key conforming to the + // "tls.client.not_after" semantic conventions. It represents the date/Time + // indicating when client certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T00:00:00.000Z" + TLSClientNotAfterKey = attribute.Key("tls.client.not_after") + + // TLSClientNotBeforeKey is the attribute Key conforming to the + // "tls.client.not_before" semantic conventions. It represents the date/Time + // indicating when client certificate is first considered valid. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1970-01-01T00:00:00.000Z" + TLSClientNotBeforeKey = attribute.Key("tls.client.not_before") + + // TLSClientSubjectKey is the attribute Key conforming to the + // "tls.client.subject" semantic conventions. It represents the distinguished + // name of subject of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CN=myclient, OU=Documentation Team, DC=example, DC=com" + TLSClientSubjectKey = attribute.Key("tls.client.subject") + + // TLSClientSupportedCiphersKey is the attribute Key conforming to the + // "tls.client.supported_ciphers" semantic conventions. It represents the array + // of ciphers offered by the client during the client hello. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384" + TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers") + + // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic + // conventions. It represents the string indicating the curve used for the given + // cipher, when applicable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "secp256r1" + TLSCurveKey = attribute.Key("tls.curve") + + // TLSEstablishedKey is the attribute Key conforming to the "tls.established" + // semantic conventions. It represents the boolean flag indicating if the TLS + // negotiation was successful and transitioned to an encrypted tunnel. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: true + TLSEstablishedKey = attribute.Key("tls.established") + + // TLSNextProtocolKey is the attribute Key conforming to the "tls.next_protocol" + // semantic conventions. It represents the string indicating the protocol being + // tunneled. Per the values in the [IANA registry], this string should be lower + // case. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "http/1.1" + // + // [IANA registry]: https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids + TLSNextProtocolKey = attribute.Key("tls.next_protocol") + + // TLSProtocolNameKey is the attribute Key conforming to the "tls.protocol.name" + // semantic conventions. It represents the normalized lowercase protocol name + // parsed from original string of the negotiated [SSL/TLS protocol version]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values + TLSProtocolNameKey = attribute.Key("tls.protocol.name") + + // TLSProtocolVersionKey is the attribute Key conforming to the + // "tls.protocol.version" semantic conventions. It represents the numeric part + // of the version parsed from the original string of the negotiated + // [SSL/TLS protocol version]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1.2", "3" + // + // [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values + TLSProtocolVersionKey = attribute.Key("tls.protocol.version") + + // TLSResumedKey is the attribute Key conforming to the "tls.resumed" semantic + // conventions. It represents the boolean flag indicating if this TLS connection + // was resumed from an existing TLS negotiation. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: true + TLSResumedKey = attribute.Key("tls.resumed") + + // TLSServerCertificateKey is the attribute Key conforming to the + // "tls.server.certificate" semantic conventions. It represents the PEM-encoded + // stand-alone certificate offered by the server. This is usually + // mutually-exclusive of `server.certificate_chain` since this value also exists + // in that list. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MII..." + TLSServerCertificateKey = attribute.Key("tls.server.certificate") + + // TLSServerCertificateChainKey is the attribute Key conforming to the + // "tls.server.certificate_chain" semantic conventions. It represents the array + // of PEM-encoded certificates that make up the certificate chain offered by the + // server. This is usually mutually-exclusive of `server.certificate` since that + // value should be the first certificate in the chain. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MII...", "MI..." + TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain") + + // TLSServerHashMd5Key is the attribute Key conforming to the + // "tls.server.hash.md5" semantic conventions. It represents the certificate + // fingerprint using the MD5 digest of DER-encoded version of certificate + // offered by the server. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC" + TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5") + + // TLSServerHashSha1Key is the attribute Key conforming to the + // "tls.server.hash.sha1" semantic conventions. It represents the certificate + // fingerprint using the SHA1 digest of DER-encoded version of certificate + // offered by the server. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9E393D93138888D288266C2D915214D1D1CCEB2A" + TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1") + + // TLSServerHashSha256Key is the attribute Key conforming to the + // "tls.server.hash.sha256" semantic conventions. It represents the certificate + // fingerprint using the SHA256 digest of DER-encoded version of certificate + // offered by the server. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0" + TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256") + + // TLSServerIssuerKey is the attribute Key conforming to the "tls.server.issuer" + // semantic conventions. It represents the distinguished name of [subject] of + // the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com" + // + // [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 + TLSServerIssuerKey = attribute.Key("tls.server.issuer") + + // TLSServerJa3sKey is the attribute Key conforming to the "tls.server.ja3s" + // semantic conventions. It represents a hash that identifies servers based on + // how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "d4e5b18d6b55c71272893221c96ba240" + TLSServerJa3sKey = attribute.Key("tls.server.ja3s") + + // TLSServerNotAfterKey is the attribute Key conforming to the + // "tls.server.not_after" semantic conventions. It represents the date/Time + // indicating when server certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T00:00:00.000Z" + TLSServerNotAfterKey = attribute.Key("tls.server.not_after") + + // TLSServerNotBeforeKey is the attribute Key conforming to the + // "tls.server.not_before" semantic conventions. It represents the date/Time + // indicating when server certificate is first considered valid. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1970-01-01T00:00:00.000Z" + TLSServerNotBeforeKey = attribute.Key("tls.server.not_before") + + // TLSServerSubjectKey is the attribute Key conforming to the + // "tls.server.subject" semantic conventions. It represents the distinguished + // name of subject of the x.509 certificate presented by the server. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CN=myserver, OU=Documentation Team, DC=example, DC=com" + TLSServerSubjectKey = attribute.Key("tls.server.subject") +) + +// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher" +// semantic conventions. It represents the string indicating the [cipher] used +// during the current connection. +// +// [cipher]: https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5 +func TLSCipher(val string) attribute.KeyValue { + return TLSCipherKey.String(val) +} + +// TLSClientCertificate returns an attribute KeyValue conforming to the +// "tls.client.certificate" semantic conventions. It represents the PEM-encoded +// stand-alone certificate offered by the client. This is usually +// mutually-exclusive of `client.certificate_chain` since this value also exists +// in that list. +func TLSClientCertificate(val string) attribute.KeyValue { + return TLSClientCertificateKey.String(val) +} + +// TLSClientCertificateChain returns an attribute KeyValue conforming to the +// "tls.client.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by the +// client. This is usually mutually-exclusive of `client.certificate` since that +// value should be the first certificate in the chain. +func TLSClientCertificateChain(val ...string) attribute.KeyValue { + return TLSClientCertificateChainKey.StringSlice(val) +} + +// TLSClientHashMd5 returns an attribute KeyValue conforming to the +// "tls.client.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate offered +// by the client. For consistency with other hash values, this value should be +// formatted as an uppercase hash. +func TLSClientHashMd5(val string) attribute.KeyValue { + return TLSClientHashMd5Key.String(val) +} + +// TLSClientHashSha1 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha1(val string) attribute.KeyValue { + return TLSClientHashSha1Key.String(val) +} + +// TLSClientHashSha256 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha256(val string) attribute.KeyValue { + return TLSClientHashSha256Key.String(val) +} + +// TLSClientIssuer returns an attribute KeyValue conforming to the +// "tls.client.issuer" semantic conventions. It represents the distinguished name +// of [subject] of the issuer of the x.509 certificate presented by the client. +// +// [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 +func TLSClientIssuer(val string) attribute.KeyValue { + return TLSClientIssuerKey.String(val) +} + +// TLSClientJa3 returns an attribute KeyValue conforming to the "tls.client.ja3" +// semantic conventions. It represents a hash that identifies clients based on +// how they perform an SSL/TLS handshake. +func TLSClientJa3(val string) attribute.KeyValue { + return TLSClientJa3Key.String(val) +} + +// TLSClientNotAfter returns an attribute KeyValue conforming to the +// "tls.client.not_after" semantic conventions. It represents the date/Time +// indicating when client certificate is no longer considered valid. +func TLSClientNotAfter(val string) attribute.KeyValue { + return TLSClientNotAfterKey.String(val) +} + +// TLSClientNotBefore returns an attribute KeyValue conforming to the +// "tls.client.not_before" semantic conventions. It represents the date/Time +// indicating when client certificate is first considered valid. +func TLSClientNotBefore(val string) attribute.KeyValue { + return TLSClientNotBeforeKey.String(val) +} + +// TLSClientSubject returns an attribute KeyValue conforming to the +// "tls.client.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the client. +func TLSClientSubject(val string) attribute.KeyValue { + return TLSClientSubjectKey.String(val) +} + +// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the +// "tls.client.supported_ciphers" semantic conventions. It represents the array +// of ciphers offered by the client during the client hello. +func TLSClientSupportedCiphers(val ...string) attribute.KeyValue { + return TLSClientSupportedCiphersKey.StringSlice(val) +} + +// TLSCurve returns an attribute KeyValue conforming to the "tls.curve" semantic +// conventions. It represents the string indicating the curve used for the given +// cipher, when applicable. +func TLSCurve(val string) attribute.KeyValue { + return TLSCurveKey.String(val) +} + +// TLSEstablished returns an attribute KeyValue conforming to the +// "tls.established" semantic conventions. It represents the boolean flag +// indicating if the TLS negotiation was successful and transitioned to an +// encrypted tunnel. +func TLSEstablished(val bool) attribute.KeyValue { + return TLSEstablishedKey.Bool(val) +} + +// TLSNextProtocol returns an attribute KeyValue conforming to the +// "tls.next_protocol" semantic conventions. It represents the string indicating +// the protocol being tunneled. Per the values in the [IANA registry], this +// string should be lower case. +// +// [IANA registry]: https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids +func TLSNextProtocol(val string) attribute.KeyValue { + return TLSNextProtocolKey.String(val) +} + +// TLSProtocolVersion returns an attribute KeyValue conforming to the +// "tls.protocol.version" semantic conventions. It represents the numeric part of +// the version parsed from the original string of the negotiated +// [SSL/TLS protocol version]. +// +// [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values +func TLSProtocolVersion(val string) attribute.KeyValue { + return TLSProtocolVersionKey.String(val) +} + +// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed" +// semantic conventions. It represents the boolean flag indicating if this TLS +// connection was resumed from an existing TLS negotiation. +func TLSResumed(val bool) attribute.KeyValue { + return TLSResumedKey.Bool(val) +} + +// TLSServerCertificate returns an attribute KeyValue conforming to the +// "tls.server.certificate" semantic conventions. It represents the PEM-encoded +// stand-alone certificate offered by the server. This is usually +// mutually-exclusive of `server.certificate_chain` since this value also exists +// in that list. +func TLSServerCertificate(val string) attribute.KeyValue { + return TLSServerCertificateKey.String(val) +} + +// TLSServerCertificateChain returns an attribute KeyValue conforming to the +// "tls.server.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by the +// server. This is usually mutually-exclusive of `server.certificate` since that +// value should be the first certificate in the chain. +func TLSServerCertificateChain(val ...string) attribute.KeyValue { + return TLSServerCertificateChainKey.StringSlice(val) +} + +// TLSServerHashMd5 returns an attribute KeyValue conforming to the +// "tls.server.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate offered +// by the server. For consistency with other hash values, this value should be +// formatted as an uppercase hash. +func TLSServerHashMd5(val string) attribute.KeyValue { + return TLSServerHashMd5Key.String(val) +} + +// TLSServerHashSha1 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha1(val string) attribute.KeyValue { + return TLSServerHashSha1Key.String(val) +} + +// TLSServerHashSha256 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha256(val string) attribute.KeyValue { + return TLSServerHashSha256Key.String(val) +} + +// TLSServerIssuer returns an attribute KeyValue conforming to the +// "tls.server.issuer" semantic conventions. It represents the distinguished name +// of [subject] of the issuer of the x.509 certificate presented by the client. +// +// [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 +func TLSServerIssuer(val string) attribute.KeyValue { + return TLSServerIssuerKey.String(val) +} + +// TLSServerJa3s returns an attribute KeyValue conforming to the +// "tls.server.ja3s" semantic conventions. It represents a hash that identifies +// servers based on how they perform an SSL/TLS handshake. +func TLSServerJa3s(val string) attribute.KeyValue { + return TLSServerJa3sKey.String(val) +} + +// TLSServerNotAfter returns an attribute KeyValue conforming to the +// "tls.server.not_after" semantic conventions. It represents the date/Time +// indicating when server certificate is no longer considered valid. +func TLSServerNotAfter(val string) attribute.KeyValue { + return TLSServerNotAfterKey.String(val) +} + +// TLSServerNotBefore returns an attribute KeyValue conforming to the +// "tls.server.not_before" semantic conventions. It represents the date/Time +// indicating when server certificate is first considered valid. +func TLSServerNotBefore(val string) attribute.KeyValue { + return TLSServerNotBeforeKey.String(val) +} + +// TLSServerSubject returns an attribute KeyValue conforming to the +// "tls.server.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the server. +func TLSServerSubject(val string) attribute.KeyValue { + return TLSServerSubjectKey.String(val) +} + +// Enum values for tls.protocol.name +var ( + // ssl + // Stability: development + TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl") + // tls + // Stability: development + TLSProtocolNameTLS = TLSProtocolNameKey.String("tls") +) + +// Namespace: url +const ( + // URLDomainKey is the attribute Key conforming to the "url.domain" semantic + // conventions. It represents the domain extracted from the `url.full`, such as + // "opentelemetry.io". + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "www.foo.bar", "opentelemetry.io", "3.12.167.2", + // "[1080:0:0:0:8:800:200C:417A]" + // Note: In some cases a URL may refer to an IP and/or port directly, without a + // domain name. In this case, the IP address would go to the domain field. If + // the URL contains a [literal IPv6 address] enclosed by `[` and `]`, the `[` + // and `]` characters should also be captured in the domain field. + // + // [literal IPv6 address]: https://www.rfc-editor.org/rfc/rfc2732#section-2 + URLDomainKey = attribute.Key("url.domain") + + // URLExtensionKey is the attribute Key conforming to the "url.extension" + // semantic conventions. It represents the file extension extracted from the + // `url.full`, excluding the leading dot. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "png", "gz" + // Note: The file extension is only set if it exists, as not every url has a + // file extension. When the file name has multiple extensions `example.tar.gz`, + // only the last one should be captured `gz`, not `tar.gz`. + URLExtensionKey = attribute.Key("url.extension") + + // URLFragmentKey is the attribute Key conforming to the "url.fragment" semantic + // conventions. It represents the [URI fragment] component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "SemConv" + // + // [URI fragment]: https://www.rfc-editor.org/rfc/rfc3986#section-3.5 + URLFragmentKey = attribute.Key("url.fragment") + + // URLFullKey is the attribute Key conforming to the "url.full" semantic + // conventions. It represents the absolute URL describing a network resource + // according to [RFC3986]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "https://www.foo.bar/search?q=OpenTelemetry#SemConv", "//localhost" + // Note: For network calls, URL usually has + // `scheme://host[:port][path][?query][#fragment]` format, where the fragment + // is not transmitted over HTTP, but if it is known, it SHOULD be included + // nevertheless. + // + // `url.full` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. + // In such case username and password SHOULD be redacted and attribute's value + // SHOULD be `https://REDACTED:REDACTED@www.example.com/`. + // + // `url.full` SHOULD capture the absolute URL when it is available (or can be + // reconstructed). + // + // Sensitive content provided in `url.full` SHOULD be scrubbed when + // instrumentations can identify it. + // + // + // Query string values for the following keys SHOULD be redacted by default and + // replaced by the + // value `REDACTED`: + // + // - [`AWSAccessKeyId`] + // - [`Signature`] + // - [`sig`] + // - [`X-Goog-Signature`] + // + // This list is subject to change over time. + // + // When a query string value is redacted, the query string key SHOULD still be + // preserved, e.g. + // `https://www.example.com/path?color=blue&sig=REDACTED`. + // + // [RFC3986]: https://www.rfc-editor.org/rfc/rfc3986 + // [`AWSAccessKeyId`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth + // [`Signature`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth + // [`sig`]: https://learn.microsoft.com/azure/storage/common/storage-sas-overview#sas-token + // [`X-Goog-Signature`]: https://cloud.google.com/storage/docs/access-control/signed-urls + URLFullKey = attribute.Key("url.full") + + // URLOriginalKey is the attribute Key conforming to the "url.original" semantic + // conventions. It represents the unmodified original URL as seen in the event + // source. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://www.foo.bar/search?q=OpenTelemetry#SemConv", + // "search?q=OpenTelemetry" + // Note: In network monitoring, the observed URL may be a full URL, whereas in + // access logs, the URL is often just represented as a path. This field is meant + // to represent the URL as it was observed, complete or not. + // `url.original` might contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case password and + // username SHOULD NOT be redacted and attribute's value SHOULD remain the same. + URLOriginalKey = attribute.Key("url.original") + + // URLPathKey is the attribute Key conforming to the "url.path" semantic + // conventions. It represents the [URI path] component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "/search" + // Note: Sensitive content provided in `url.path` SHOULD be scrubbed when + // instrumentations can identify it. + // + // [URI path]: https://www.rfc-editor.org/rfc/rfc3986#section-3.3 + URLPathKey = attribute.Key("url.path") + + // URLPortKey is the attribute Key conforming to the "url.port" semantic + // conventions. It represents the port extracted from the `url.full`. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 443 + URLPortKey = attribute.Key("url.port") + + // URLQueryKey is the attribute Key conforming to the "url.query" semantic + // conventions. It represents the [URI query] component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "q=OpenTelemetry" + // Note: Sensitive content provided in `url.query` SHOULD be scrubbed when + // instrumentations can identify it. + // + // + // Query string values for the following keys SHOULD be redacted by default and + // replaced by the value `REDACTED`: + // + // - [`AWSAccessKeyId`] + // - [`Signature`] + // - [`sig`] + // - [`X-Goog-Signature`] + // + // This list is subject to change over time. + // + // When a query string value is redacted, the query string key SHOULD still be + // preserved, e.g. + // `q=OpenTelemetry&sig=REDACTED`. + // + // [URI query]: https://www.rfc-editor.org/rfc/rfc3986#section-3.4 + // [`AWSAccessKeyId`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth + // [`Signature`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth + // [`sig`]: https://learn.microsoft.com/azure/storage/common/storage-sas-overview#sas-token + // [`X-Goog-Signature`]: https://cloud.google.com/storage/docs/access-control/signed-urls + URLQueryKey = attribute.Key("url.query") + + // URLRegisteredDomainKey is the attribute Key conforming to the + // "url.registered_domain" semantic conventions. It represents the highest + // registered url domain, stripped of the subdomain. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "example.com", "foo.co.uk" + // Note: This value can be determined precisely with the [public suffix list]. + // For example, the registered domain for `foo.example.com` is `example.com`. + // Trying to approximate this by simply taking the last two labels will not work + // well for TLDs such as `co.uk`. + // + // [public suffix list]: https://publicsuffix.org/ + URLRegisteredDomainKey = attribute.Key("url.registered_domain") + + // URLSchemeKey is the attribute Key conforming to the "url.scheme" semantic + // conventions. It represents the [URI scheme] component identifying the used + // protocol. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "https", "ftp", "telnet" + // + // [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 + URLSchemeKey = attribute.Key("url.scheme") + + // URLSubdomainKey is the attribute Key conforming to the "url.subdomain" + // semantic conventions. It represents the subdomain portion of a fully + // qualified domain name includes all of the names except the host name under + // the registered_domain. In a partially qualified domain, or if the + // qualification level of the full name cannot be determined, subdomain contains + // all of the names below the registered domain. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "east", "sub2.sub1" + // Note: The subdomain portion of `www.east.mydomain.co.uk` is `east`. If the + // domain has multiple levels of subdomain, such as `sub2.sub1.example.com`, the + // subdomain field should contain `sub2.sub1`, with no trailing period. + URLSubdomainKey = attribute.Key("url.subdomain") + + // URLTemplateKey is the attribute Key conforming to the "url.template" semantic + // conventions. It represents the low-cardinality template of an + // [absolute path reference]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/users/{id}", "/users/:id", "/users?id={id}" + // + // [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2 + URLTemplateKey = attribute.Key("url.template") + + // URLTopLevelDomainKey is the attribute Key conforming to the + // "url.top_level_domain" semantic conventions. It represents the effective top + // level domain (eTLD), also known as the domain suffix, is the last part of the + // domain name. For example, the top level domain for example.com is `com`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "com", "co.uk" + // Note: This value can be determined precisely with the [public suffix list]. + // + // [public suffix list]: https://publicsuffix.org/ + URLTopLevelDomainKey = attribute.Key("url.top_level_domain") +) + +// URLDomain returns an attribute KeyValue conforming to the "url.domain" +// semantic conventions. It represents the domain extracted from the `url.full`, +// such as "opentelemetry.io". +func URLDomain(val string) attribute.KeyValue { + return URLDomainKey.String(val) +} + +// URLExtension returns an attribute KeyValue conforming to the "url.extension" +// semantic conventions. It represents the file extension extracted from the +// `url.full`, excluding the leading dot. +func URLExtension(val string) attribute.KeyValue { + return URLExtensionKey.String(val) +} + +// URLFragment returns an attribute KeyValue conforming to the "url.fragment" +// semantic conventions. It represents the [URI fragment] component. +// +// [URI fragment]: https://www.rfc-editor.org/rfc/rfc3986#section-3.5 +func URLFragment(val string) attribute.KeyValue { + return URLFragmentKey.String(val) +} + +// URLFull returns an attribute KeyValue conforming to the "url.full" semantic +// conventions. It represents the absolute URL describing a network resource +// according to [RFC3986]. +// +// [RFC3986]: https://www.rfc-editor.org/rfc/rfc3986 +func URLFull(val string) attribute.KeyValue { + return URLFullKey.String(val) +} + +// URLOriginal returns an attribute KeyValue conforming to the "url.original" +// semantic conventions. It represents the unmodified original URL as seen in the +// event source. +func URLOriginal(val string) attribute.KeyValue { + return URLOriginalKey.String(val) +} + +// URLPath returns an attribute KeyValue conforming to the "url.path" semantic +// conventions. It represents the [URI path] component. +// +// [URI path]: https://www.rfc-editor.org/rfc/rfc3986#section-3.3 +func URLPath(val string) attribute.KeyValue { + return URLPathKey.String(val) +} + +// URLPort returns an attribute KeyValue conforming to the "url.port" semantic +// conventions. It represents the port extracted from the `url.full`. +func URLPort(val int) attribute.KeyValue { + return URLPortKey.Int(val) +} + +// URLQuery returns an attribute KeyValue conforming to the "url.query" semantic +// conventions. It represents the [URI query] component. +// +// [URI query]: https://www.rfc-editor.org/rfc/rfc3986#section-3.4 +func URLQuery(val string) attribute.KeyValue { + return URLQueryKey.String(val) +} + +// URLRegisteredDomain returns an attribute KeyValue conforming to the +// "url.registered_domain" semantic conventions. It represents the highest +// registered url domain, stripped of the subdomain. +func URLRegisteredDomain(val string) attribute.KeyValue { + return URLRegisteredDomainKey.String(val) +} + +// URLScheme returns an attribute KeyValue conforming to the "url.scheme" +// semantic conventions. It represents the [URI scheme] component identifying the +// used protocol. +// +// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 +func URLScheme(val string) attribute.KeyValue { + return URLSchemeKey.String(val) +} + +// URLSubdomain returns an attribute KeyValue conforming to the "url.subdomain" +// semantic conventions. It represents the subdomain portion of a fully qualified +// domain name includes all of the names except the host name under the +// registered_domain. In a partially qualified domain, or if the qualification +// level of the full name cannot be determined, subdomain contains all of the +// names below the registered domain. +func URLSubdomain(val string) attribute.KeyValue { + return URLSubdomainKey.String(val) +} + +// URLTemplate returns an attribute KeyValue conforming to the "url.template" +// semantic conventions. It represents the low-cardinality template of an +// [absolute path reference]. +// +// [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2 +func URLTemplate(val string) attribute.KeyValue { + return URLTemplateKey.String(val) +} + +// URLTopLevelDomain returns an attribute KeyValue conforming to the +// "url.top_level_domain" semantic conventions. It represents the effective top +// level domain (eTLD), also known as the domain suffix, is the last part of the +// domain name. For example, the top level domain for example.com is `com`. +func URLTopLevelDomain(val string) attribute.KeyValue { + return URLTopLevelDomainKey.String(val) +} + +// Namespace: user +const ( + // UserEmailKey is the attribute Key conforming to the "user.email" semantic + // conventions. It represents the user email address. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "a.einstein@example.com" + UserEmailKey = attribute.Key("user.email") + + // UserFullNameKey is the attribute Key conforming to the "user.full_name" + // semantic conventions. It represents the user's full name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Albert Einstein" + UserFullNameKey = attribute.Key("user.full_name") + + // UserHashKey is the attribute Key conforming to the "user.hash" semantic + // conventions. It represents the unique user hash to correlate information for + // a user in anonymized form. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "364fc68eaf4c8acec74a4e52d7d1feaa" + // Note: Useful if `user.id` or `user.name` contain confidential information and + // cannot be used. + UserHashKey = attribute.Key("user.hash") + + // UserIDKey is the attribute Key conforming to the "user.id" semantic + // conventions. It represents the unique identifier of the user. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "S-1-5-21-202424912787-2692429404-2351956786-1000" + UserIDKey = attribute.Key("user.id") + + // UserNameKey is the attribute Key conforming to the "user.name" semantic + // conventions. It represents the short name or login/username of the user. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "a.einstein" + UserNameKey = attribute.Key("user.name") + + // UserRolesKey is the attribute Key conforming to the "user.roles" semantic + // conventions. It represents the array of user roles at the time of the event. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "admin", "reporting_user" + UserRolesKey = attribute.Key("user.roles") +) + +// UserEmail returns an attribute KeyValue conforming to the "user.email" +// semantic conventions. It represents the user email address. +func UserEmail(val string) attribute.KeyValue { + return UserEmailKey.String(val) +} + +// UserFullName returns an attribute KeyValue conforming to the "user.full_name" +// semantic conventions. It represents the user's full name. +func UserFullName(val string) attribute.KeyValue { + return UserFullNameKey.String(val) +} + +// UserHash returns an attribute KeyValue conforming to the "user.hash" semantic +// conventions. It represents the unique user hash to correlate information for a +// user in anonymized form. +func UserHash(val string) attribute.KeyValue { + return UserHashKey.String(val) +} + +// UserID returns an attribute KeyValue conforming to the "user.id" semantic +// conventions. It represents the unique identifier of the user. +func UserID(val string) attribute.KeyValue { + return UserIDKey.String(val) +} + +// UserName returns an attribute KeyValue conforming to the "user.name" semantic +// conventions. It represents the short name or login/username of the user. +func UserName(val string) attribute.KeyValue { + return UserNameKey.String(val) +} + +// UserRoles returns an attribute KeyValue conforming to the "user.roles" +// semantic conventions. It represents the array of user roles at the time of the +// event. +func UserRoles(val ...string) attribute.KeyValue { + return UserRolesKey.StringSlice(val) +} + +// Namespace: user_agent +const ( + // UserAgentNameKey is the attribute Key conforming to the "user_agent.name" + // semantic conventions. It represents the name of the user-agent extracted from + // original. Usually refers to the browser's name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Safari", "YourApp" + // Note: [Example] of extracting browser's name from original string. In the + // case of using a user-agent for non-browser products, such as microservices + // with multiple names/versions inside the `user_agent.original`, the most + // significant name SHOULD be selected. In such a scenario it should align with + // `user_agent.version` + // + // [Example]: https://www.whatsmyua.info + UserAgentNameKey = attribute.Key("user_agent.name") + + // UserAgentOriginalKey is the attribute Key conforming to the + // "user_agent.original" semantic conventions. It represents the value of the + // [HTTP User-Agent] header sent by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "CERN-LineMode/2.15 libwww/2.17b3", "Mozilla/5.0 (iPhone; CPU + // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) + // Version/14.1.2 Mobile/15E148 Safari/604.1", "YourApp/1.0.0 + // grpc-java-okhttp/1.27.2" + // + // [HTTP User-Agent]: https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent + UserAgentOriginalKey = attribute.Key("user_agent.original") + + // UserAgentOSNameKey is the attribute Key conforming to the + // "user_agent.os.name" semantic conventions. It represents the human readable + // operating system name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "iOS", "Android", "Ubuntu" + // Note: For mapping user agent strings to OS names, libraries such as + // [ua-parser] can be utilized. + // + // [ua-parser]: https://github.com/ua-parser + UserAgentOSNameKey = attribute.Key("user_agent.os.name") + + // UserAgentOSVersionKey is the attribute Key conforming to the + // "user_agent.os.version" semantic conventions. It represents the version + // string of the operating system as defined in [Version Attributes]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "14.2.1", "18.04.1" + // Note: For mapping user agent strings to OS versions, libraries such as + // [ua-parser] can be utilized. + // + // [Version Attributes]: /docs/resource/README.md#version-attributes + // [ua-parser]: https://github.com/ua-parser + UserAgentOSVersionKey = attribute.Key("user_agent.os.version") + + // UserAgentSyntheticTypeKey is the attribute Key conforming to the + // "user_agent.synthetic.type" semantic conventions. It represents the specifies + // the category of synthetic traffic, such as tests or bots. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: This attribute MAY be derived from the contents of the + // `user_agent.original` attribute. Components that populate the attribute are + // responsible for determining what they consider to be synthetic bot or test + // traffic. This attribute can either be set for self-identification purposes, + // or on telemetry detected to be generated as a result of a synthetic request. + // This attribute is useful for distinguishing between genuine client traffic + // and synthetic traffic generated by bots or tests. + UserAgentSyntheticTypeKey = attribute.Key("user_agent.synthetic.type") + + // UserAgentVersionKey is the attribute Key conforming to the + // "user_agent.version" semantic conventions. It represents the version of the + // user-agent extracted from original. Usually refers to the browser's version. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "14.1.2", "1.0.0" + // Note: [Example] of extracting browser's version from original string. In the + // case of using a user-agent for non-browser products, such as microservices + // with multiple names/versions inside the `user_agent.original`, the most + // significant version SHOULD be selected. In such a scenario it should align + // with `user_agent.name` + // + // [Example]: https://www.whatsmyua.info + UserAgentVersionKey = attribute.Key("user_agent.version") +) + +// UserAgentName returns an attribute KeyValue conforming to the +// "user_agent.name" semantic conventions. It represents the name of the +// user-agent extracted from original. Usually refers to the browser's name. +func UserAgentName(val string) attribute.KeyValue { + return UserAgentNameKey.String(val) +} + +// UserAgentOriginal returns an attribute KeyValue conforming to the +// "user_agent.original" semantic conventions. It represents the value of the +// [HTTP User-Agent] header sent by the client. +// +// [HTTP User-Agent]: https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent +func UserAgentOriginal(val string) attribute.KeyValue { + return UserAgentOriginalKey.String(val) +} + +// UserAgentOSName returns an attribute KeyValue conforming to the +// "user_agent.os.name" semantic conventions. It represents the human readable +// operating system name. +func UserAgentOSName(val string) attribute.KeyValue { + return UserAgentOSNameKey.String(val) +} + +// UserAgentOSVersion returns an attribute KeyValue conforming to the +// "user_agent.os.version" semantic conventions. It represents the version string +// of the operating system as defined in [Version Attributes]. +// +// [Version Attributes]: /docs/resource/README.md#version-attributes +func UserAgentOSVersion(val string) attribute.KeyValue { + return UserAgentOSVersionKey.String(val) +} + +// UserAgentVersion returns an attribute KeyValue conforming to the +// "user_agent.version" semantic conventions. It represents the version of the +// user-agent extracted from original. Usually refers to the browser's version. +func UserAgentVersion(val string) attribute.KeyValue { + return UserAgentVersionKey.String(val) +} + +// Enum values for user_agent.synthetic.type +var ( + // Bot source. + // Stability: development + UserAgentSyntheticTypeBot = UserAgentSyntheticTypeKey.String("bot") + // Synthetic test source. + // Stability: development + UserAgentSyntheticTypeTest = UserAgentSyntheticTypeKey.String("test") +) + +// Namespace: vcs +const ( + // VCSChangeIDKey is the attribute Key conforming to the "vcs.change.id" + // semantic conventions. It represents the ID of the change (pull request/merge + // request/changelist) if applicable. This is usually a unique (within + // repository) identifier generated by the VCS system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "123" + VCSChangeIDKey = attribute.Key("vcs.change.id") + + // VCSChangeStateKey is the attribute Key conforming to the "vcs.change.state" + // semantic conventions. It represents the state of the change (pull + // request/merge request/changelist). + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "open", "closed", "merged" + VCSChangeStateKey = attribute.Key("vcs.change.state") + + // VCSChangeTitleKey is the attribute Key conforming to the "vcs.change.title" + // semantic conventions. It represents the human readable title of the change + // (pull request/merge request/changelist). This title is often a brief summary + // of the change and may get merged in to a ref as the commit summary. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Fixes broken thing", "feat: add my new feature", "[chore] update + // dependency" + VCSChangeTitleKey = attribute.Key("vcs.change.title") + + // VCSLineChangeTypeKey is the attribute Key conforming to the + // "vcs.line_change.type" semantic conventions. It represents the type of line + // change being measured on a branch or change. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "added", "removed" + VCSLineChangeTypeKey = attribute.Key("vcs.line_change.type") + + // VCSOwnerNameKey is the attribute Key conforming to the "vcs.owner.name" + // semantic conventions. It represents the group owner within the version + // control system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-org", "myteam", "business-unit" + VCSOwnerNameKey = attribute.Key("vcs.owner.name") + + // VCSProviderNameKey is the attribute Key conforming to the "vcs.provider.name" + // semantic conventions. It represents the name of the version control system + // provider. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "github", "gitlab", "gitea", "bitbucket" + VCSProviderNameKey = attribute.Key("vcs.provider.name") + + // VCSRefBaseNameKey is the attribute Key conforming to the "vcs.ref.base.name" + // semantic conventions. It represents the name of the [reference] such as + // **branch** or **tag** in the repository. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-feature-branch", "tag-1-test" + // Note: `base` refers to the starting point of a change. For example, `main` + // would be the base reference of type branch if you've created a new + // reference of type branch from it and created new commits. + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefBaseNameKey = attribute.Key("vcs.ref.base.name") + + // VCSRefBaseRevisionKey is the attribute Key conforming to the + // "vcs.ref.base.revision" semantic conventions. It represents the revision, + // literally [revised version], The revision most often refers to a commit + // object in Git, or a revision number in SVN. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9d59409acf479dfa0df1aa568182e43e43df8bbe28d60fcf2bc52e30068802cc", + // "main", "123", "HEAD" + // Note: `base` refers to the starting point of a change. For example, `main` + // would be the base reference of type branch if you've created a new + // reference of type branch from it and created new commits. The + // revision can be a full [hash value (see + // glossary)], + // of the recorded change to a ref within a repository pointing to a + // commit [commit] object. It does + // not necessarily have to be a hash; it can simply define a [revision + // number] + // which is an integer that is monotonically increasing. In cases where + // it is identical to the `ref.base.name`, it SHOULD still be included. + // It is up to the implementer to decide which value to set as the + // revision based on the VCS system and situational context. + // + // [revised version]: https://www.merriam-webster.com/dictionary/revision + // [hash value (see + // glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf + // [commit]: https://git-scm.com/docs/git-commit + // [revision + // number]: https://svnbook.red-bean.com/en/1.7/svn.tour.revs.specifiers.html + VCSRefBaseRevisionKey = attribute.Key("vcs.ref.base.revision") + + // VCSRefBaseTypeKey is the attribute Key conforming to the "vcs.ref.base.type" + // semantic conventions. It represents the type of the [reference] in the + // repository. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "branch", "tag" + // Note: `base` refers to the starting point of a change. For example, `main` + // would be the base reference of type branch if you've created a new + // reference of type branch from it and created new commits. + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefBaseTypeKey = attribute.Key("vcs.ref.base.type") + + // VCSRefHeadNameKey is the attribute Key conforming to the "vcs.ref.head.name" + // semantic conventions. It represents the name of the [reference] such as + // **branch** or **tag** in the repository. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-feature-branch", "tag-1-test" + // Note: `head` refers to where you are right now; the current reference at a + // given time. + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefHeadNameKey = attribute.Key("vcs.ref.head.name") + + // VCSRefHeadRevisionKey is the attribute Key conforming to the + // "vcs.ref.head.revision" semantic conventions. It represents the revision, + // literally [revised version], The revision most often refers to a commit + // object in Git, or a revision number in SVN. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9d59409acf479dfa0df1aa568182e43e43df8bbe28d60fcf2bc52e30068802cc", + // "main", "123", "HEAD" + // Note: `head` refers to where you are right now; the current reference at a + // given time.The revision can be a full [hash value (see + // glossary)], + // of the recorded change to a ref within a repository pointing to a + // commit [commit] object. It does + // not necessarily have to be a hash; it can simply define a [revision + // number] + // which is an integer that is monotonically increasing. In cases where + // it is identical to the `ref.head.name`, it SHOULD still be included. + // It is up to the implementer to decide which value to set as the + // revision based on the VCS system and situational context. + // + // [revised version]: https://www.merriam-webster.com/dictionary/revision + // [hash value (see + // glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf + // [commit]: https://git-scm.com/docs/git-commit + // [revision + // number]: https://svnbook.red-bean.com/en/1.7/svn.tour.revs.specifiers.html + VCSRefHeadRevisionKey = attribute.Key("vcs.ref.head.revision") + + // VCSRefHeadTypeKey is the attribute Key conforming to the "vcs.ref.head.type" + // semantic conventions. It represents the type of the [reference] in the + // repository. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "branch", "tag" + // Note: `head` refers to where you are right now; the current reference at a + // given time. + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefHeadTypeKey = attribute.Key("vcs.ref.head.type") + + // VCSRefTypeKey is the attribute Key conforming to the "vcs.ref.type" semantic + // conventions. It represents the type of the [reference] in the repository. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "branch", "tag" + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefTypeKey = attribute.Key("vcs.ref.type") + + // VCSRepositoryNameKey is the attribute Key conforming to the + // "vcs.repository.name" semantic conventions. It represents the human readable + // name of the repository. It SHOULD NOT include any additional identifier like + // Group/SubGroup in GitLab or organization in GitHub. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "semantic-conventions", "my-cool-repo" + // Note: Due to it only being the name, it can clash with forks of the same + // repository if collecting telemetry across multiple orgs or groups in + // the same backends. + VCSRepositoryNameKey = attribute.Key("vcs.repository.name") + + // VCSRepositoryURLFullKey is the attribute Key conforming to the + // "vcs.repository.url.full" semantic conventions. It represents the + // [canonical URL] of the repository providing the complete HTTP(S) address in + // order to locate and identify the repository through a browser. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "https://github.com/opentelemetry/open-telemetry-collector-contrib", + // "https://gitlab.com/my-org/my-project/my-projects-project/repo" + // Note: In Git Version Control Systems, the canonical URL SHOULD NOT include + // the `.git` extension. + // + // [canonical URL]: https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical. + VCSRepositoryURLFullKey = attribute.Key("vcs.repository.url.full") + + // VCSRevisionDeltaDirectionKey is the attribute Key conforming to the + // "vcs.revision_delta.direction" semantic conventions. It represents the type + // of revision comparison. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ahead", "behind" + VCSRevisionDeltaDirectionKey = attribute.Key("vcs.revision_delta.direction") +) + +// VCSChangeID returns an attribute KeyValue conforming to the "vcs.change.id" +// semantic conventions. It represents the ID of the change (pull request/merge +// request/changelist) if applicable. This is usually a unique (within +// repository) identifier generated by the VCS system. +func VCSChangeID(val string) attribute.KeyValue { + return VCSChangeIDKey.String(val) +} + +// VCSChangeTitle returns an attribute KeyValue conforming to the +// "vcs.change.title" semantic conventions. It represents the human readable +// title of the change (pull request/merge request/changelist). This title is +// often a brief summary of the change and may get merged in to a ref as the +// commit summary. +func VCSChangeTitle(val string) attribute.KeyValue { + return VCSChangeTitleKey.String(val) +} + +// VCSOwnerName returns an attribute KeyValue conforming to the "vcs.owner.name" +// semantic conventions. It represents the group owner within the version control +// system. +func VCSOwnerName(val string) attribute.KeyValue { + return VCSOwnerNameKey.String(val) +} + +// VCSRefBaseName returns an attribute KeyValue conforming to the +// "vcs.ref.base.name" semantic conventions. It represents the name of the +// [reference] such as **branch** or **tag** in the repository. +// +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +func VCSRefBaseName(val string) attribute.KeyValue { + return VCSRefBaseNameKey.String(val) +} + +// VCSRefBaseRevision returns an attribute KeyValue conforming to the +// "vcs.ref.base.revision" semantic conventions. It represents the revision, +// literally [revised version], The revision most often refers to a commit object +// in Git, or a revision number in SVN. +// +// [revised version]: https://www.merriam-webster.com/dictionary/revision +func VCSRefBaseRevision(val string) attribute.KeyValue { + return VCSRefBaseRevisionKey.String(val) +} + +// VCSRefHeadName returns an attribute KeyValue conforming to the +// "vcs.ref.head.name" semantic conventions. It represents the name of the +// [reference] such as **branch** or **tag** in the repository. +// +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +func VCSRefHeadName(val string) attribute.KeyValue { + return VCSRefHeadNameKey.String(val) +} + +// VCSRefHeadRevision returns an attribute KeyValue conforming to the +// "vcs.ref.head.revision" semantic conventions. It represents the revision, +// literally [revised version], The revision most often refers to a commit object +// in Git, or a revision number in SVN. +// +// [revised version]: https://www.merriam-webster.com/dictionary/revision +func VCSRefHeadRevision(val string) attribute.KeyValue { + return VCSRefHeadRevisionKey.String(val) +} + +// VCSRepositoryName returns an attribute KeyValue conforming to the +// "vcs.repository.name" semantic conventions. It represents the human readable +// name of the repository. It SHOULD NOT include any additional identifier like +// Group/SubGroup in GitLab or organization in GitHub. +func VCSRepositoryName(val string) attribute.KeyValue { + return VCSRepositoryNameKey.String(val) +} + +// VCSRepositoryURLFull returns an attribute KeyValue conforming to the +// "vcs.repository.url.full" semantic conventions. It represents the +// [canonical URL] of the repository providing the complete HTTP(S) address in +// order to locate and identify the repository through a browser. +// +// [canonical URL]: https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical. +func VCSRepositoryURLFull(val string) attribute.KeyValue { + return VCSRepositoryURLFullKey.String(val) +} + +// Enum values for vcs.change.state +var ( + // Open means the change is currently active and under review. It hasn't been + // merged into the target branch yet, and it's still possible to make changes or + // add comments. + // Stability: development + VCSChangeStateOpen = VCSChangeStateKey.String("open") + // WIP (work-in-progress, draft) means the change is still in progress and not + // yet ready for a full review. It might still undergo significant changes. + // Stability: development + VCSChangeStateWip = VCSChangeStateKey.String("wip") + // Closed means the merge request has been closed without merging. This can + // happen for various reasons, such as the changes being deemed unnecessary, the + // issue being resolved in another way, or the author deciding to withdraw the + // request. + // Stability: development + VCSChangeStateClosed = VCSChangeStateKey.String("closed") + // Merged indicates that the change has been successfully integrated into the + // target codebase. + // Stability: development + VCSChangeStateMerged = VCSChangeStateKey.String("merged") +) + +// Enum values for vcs.line_change.type +var ( + // How many lines were added. + // Stability: development + VCSLineChangeTypeAdded = VCSLineChangeTypeKey.String("added") + // How many lines were removed. + // Stability: development + VCSLineChangeTypeRemoved = VCSLineChangeTypeKey.String("removed") +) + +// Enum values for vcs.provider.name +var ( + // [GitHub] + // Stability: development + // + // [GitHub]: https://github.com + VCSProviderNameGithub = VCSProviderNameKey.String("github") + // [GitLab] + // Stability: development + // + // [GitLab]: https://gitlab.com + VCSProviderNameGitlab = VCSProviderNameKey.String("gitlab") + // Deprecated: Replaced by `gitea`. + VCSProviderNameGittea = VCSProviderNameKey.String("gittea") + // [Gitea] + // Stability: development + // + // [Gitea]: https://gitea.io + VCSProviderNameGitea = VCSProviderNameKey.String("gitea") + // [Bitbucket] + // Stability: development + // + // [Bitbucket]: https://bitbucket.org + VCSProviderNameBitbucket = VCSProviderNameKey.String("bitbucket") +) + +// Enum values for vcs.ref.base.type +var ( + // [branch] + // Stability: development + // + // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch + VCSRefBaseTypeBranch = VCSRefBaseTypeKey.String("branch") + // [tag] + // Stability: development + // + // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag + VCSRefBaseTypeTag = VCSRefBaseTypeKey.String("tag") +) + +// Enum values for vcs.ref.head.type +var ( + // [branch] + // Stability: development + // + // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch + VCSRefHeadTypeBranch = VCSRefHeadTypeKey.String("branch") + // [tag] + // Stability: development + // + // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag + VCSRefHeadTypeTag = VCSRefHeadTypeKey.String("tag") +) + +// Enum values for vcs.ref.type +var ( + // [branch] + // Stability: development + // + // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch + VCSRefTypeBranch = VCSRefTypeKey.String("branch") + // [tag] + // Stability: development + // + // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag + VCSRefTypeTag = VCSRefTypeKey.String("tag") +) + +// Enum values for vcs.revision_delta.direction +var ( + // How many revisions the change is behind the target ref. + // Stability: development + VCSRevisionDeltaDirectionBehind = VCSRevisionDeltaDirectionKey.String("behind") + // How many revisions the change is ahead of the target ref. + // Stability: development + VCSRevisionDeltaDirectionAhead = VCSRevisionDeltaDirectionKey.String("ahead") +) + +// Namespace: webengine +const ( + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the additional + // description of the web engine (e.g. detailed version and edition + // information). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final" + WebEngineDescriptionKey = attribute.Key("webengine.description") + + // WebEngineNameKey is the attribute Key conforming to the "webengine.name" + // semantic conventions. It represents the name of the web engine. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "WildFly" + WebEngineNameKey = attribute.Key("webengine.name") + + // WebEngineVersionKey is the attribute Key conforming to the + // "webengine.version" semantic conventions. It represents the version of the + // web engine. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "21.0.0" + WebEngineVersionKey = attribute.Key("webengine.version") +) + +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + +// WebEngineName returns an attribute KeyValue conforming to the "webengine.name" +// semantic conventions. It represents the name of the web engine. +func WebEngineName(val string) attribute.KeyValue { + return WebEngineNameKey.String(val) +} + +// WebEngineVersion returns an attribute KeyValue conforming to the +// "webengine.version" semantic conventions. It represents the version of the web +// engine. +func WebEngineVersion(val string) attribute.KeyValue { + return WebEngineVersionKey.String(val) +} \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/doc.go new file mode 100644 index 000000000..2c5c7ebd0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/doc.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the v1.34.0 +// version of the OpenTelemetry semantic conventions. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.34.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/exception.go new file mode 100644 index 000000000..88a998f1e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/exception.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.34.0" + +const ( + // ExceptionEventName is the name of the Span event representing an exception. + ExceptionEventName = "exception" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/schema.go new file mode 100644 index 000000000..3c23d4592 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/schema.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.34.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.34.0" diff --git a/vendor/go.opentelemetry.io/otel/trace/auto.go b/vendor/go.opentelemetry.io/otel/trace/auto.go index d90af8f67..f3aa39813 100644 --- a/vendor/go.opentelemetry.io/otel/trace/auto.go +++ b/vendor/go.opentelemetry.io/otel/trace/auto.go @@ -20,7 +20,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.34.0" "go.opentelemetry.io/otel/trace/embedded" "go.opentelemetry.io/otel/trace/internal/telemetry" ) diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index ac3c0b15d..7afe92b59 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.36.0" + return "1.37.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index 79f82f3d0..9d4742a17 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -3,13 +3,12 @@ module-sets: stable-v1: - version: v1.36.0 + version: v1.37.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus - go.opentelemetry.io/otel/bridge/opencensus/test - go.opentelemetry.io/otel/bridge/opentracing - - go.opentelemetry.io/otel/bridge/opentracing/test - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp - go.opentelemetry.io/otel/exporters/otlp/otlptrace @@ -23,14 +22,16 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.58.0 + version: v0.59.0 modules: - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.12.0 + version: v0.13.0 modules: - go.opentelemetry.io/otel/log + - go.opentelemetry.io/otel/log/logtest - go.opentelemetry.io/otel/sdk/log + - go.opentelemetry.io/otel/sdk/log/logtest - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp - go.opentelemetry.io/otel/exporters/stdout/stdoutlog @@ -40,6 +41,4 @@ module-sets: - go.opentelemetry.io/otel/schema excluded-modules: - go.opentelemetry.io/otel/internal/tools - - go.opentelemetry.io/otel/log/logtest - - go.opentelemetry.io/otel/sdk/log/logtest - go.opentelemetry.io/otel/trace/internal/telemetry/test diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md index 5d4096d46..df35bb9a8 100644 --- a/vendor/google.golang.org/grpc/MAINTAINERS.md +++ b/vendor/google.golang.org/grpc/MAINTAINERS.md @@ -9,21 +9,19 @@ for general contribution guidelines. ## Maintainers (in alphabetical order) -- [aranjans](https://github.com/aranjans), Google LLC - [arjan-bal](https://github.com/arjan-bal), Google LLC - [arvindbr8](https://github.com/arvindbr8), Google LLC - [atollena](https://github.com/atollena), Datadog, Inc. - [dfawley](https://github.com/dfawley), Google LLC - [easwars](https://github.com/easwars), Google LLC -- [erm-g](https://github.com/erm-g), Google LLC - [gtcooke94](https://github.com/gtcooke94), Google LLC -- [purnesh42h](https://github.com/purnesh42h), Google LLC -- [zasweq](https://github.com/zasweq), Google LLC ## Emeritus Maintainers (in alphabetical order) - [adelez](https://github.com/adelez) +- [aranjans](https://github.com/aranjans) - [canguler](https://github.com/canguler) - [cesarghali](https://github.com/cesarghali) +- [erm-g](https://github.com/erm-g) - [iamqizhao](https://github.com/iamqizhao) - [jeanbza](https://github.com/jeanbza) - [jtattermusch](https://github.com/jtattermusch) @@ -32,5 +30,7 @@ for general contribution guidelines. - [matt-kwong](https://github.com/matt-kwong) - [menghanl](https://github.com/menghanl) - [nicolasnoble](https://github.com/nicolasnoble) +- [purnesh42h](https://github.com/purnesh42h) - [srini100](https://github.com/srini100) - [yongni](https://github.com/yongni) +- [zasweq](https://github.com/zasweq) diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index c9b343c71..b1264017d 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -360,6 +360,10 @@ type Balancer interface { // call SubConn.Shutdown for its existing SubConns; however, this will be // required in a future release, so it is recommended. Close() + // ExitIdle instructs the LB policy to reconnect to backends / exit the + // IDLE state, if appropriate and possible. Note that SubConns that enter + // the IDLE state will not reconnect until SubConn.Connect is called. + ExitIdle() } // ExitIdler is an optional interface for balancers to implement. If @@ -367,8 +371,8 @@ type Balancer interface { // the ClientConn is idle. If unimplemented, ClientConn.Connect will cause // all SubConns to connect. // -// Notice: it will be required for all balancers to implement this in a future -// release. +// Deprecated: All balancers must implement this interface. This interface will +// be removed in a future release. type ExitIdler interface { // ExitIdle instructs the LB policy to reconnect to backends / exit the // IDLE state, if appropriate and possible. Note that SubConns that enter diff --git a/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go b/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go index cc606f4da..360db08eb 100644 --- a/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go +++ b/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go @@ -37,6 +37,8 @@ import ( "google.golang.org/grpc/resolver" ) +var randIntN = rand.IntN + // ChildState is the balancer state of a child along with the endpoint which // identifies the child balancer. type ChildState struct { @@ -45,7 +47,15 @@ type ChildState struct { // Balancer exposes only the ExitIdler interface of the child LB policy. // Other methods of the child policy are called only by endpointsharding. - Balancer balancer.ExitIdler + Balancer ExitIdler +} + +// ExitIdler provides access to only the ExitIdle method of the child balancer. +type ExitIdler interface { + // ExitIdle instructs the LB policy to reconnect to backends / exit the + // IDLE state, if appropriate and possible. Note that SubConns that enter + // the IDLE state will not reconnect until SubConn.Connect is called. + ExitIdle() } // Options are the options to configure the behaviour of the @@ -104,6 +114,21 @@ type endpointSharding struct { mu sync.Mutex } +// rotateEndpoints returns a slice of all the input endpoints rotated a random +// amount. +func rotateEndpoints(es []resolver.Endpoint) []resolver.Endpoint { + les := len(es) + if les == 0 { + return es + } + r := randIntN(les) + // Make a copy to avoid mutating data beyond the end of es. + ret := make([]resolver.Endpoint, les) + copy(ret, es[r:]) + copy(ret[les-r:], es[:r]) + return ret +} + // UpdateClientConnState creates a child for new endpoints and deletes children // for endpoints that are no longer present. It also updates all the children, // and sends a single synchronous update of the childrens' aggregated state at @@ -125,7 +150,7 @@ func (es *endpointSharding) UpdateClientConnState(state balancer.ClientConnState newChildren := resolver.NewEndpointMap[*balancerWrapper]() // Update/Create new children. - for _, endpoint := range state.ResolverState.Endpoints { + for _, endpoint := range rotateEndpoints(state.ResolverState.Endpoints) { if _, ok := newChildren.Get(endpoint); ok { // Endpoint child was already created, continue to avoid duplicate // update. @@ -205,6 +230,16 @@ func (es *endpointSharding) Close() { } } +func (es *endpointSharding) ExitIdle() { + es.childMu.Lock() + defer es.childMu.Unlock() + for _, bw := range es.children.Load().Values() { + if !bw.isClosed { + bw.child.ExitIdle() + } + } +} + // updateState updates this component's state. It sends the aggregated state, // and a picker with round robin behavior with all the child states present if // needed. @@ -261,7 +296,7 @@ func (es *endpointSharding) updateState() { p := &pickerWithChildStates{ pickers: pickers, childStates: childStates, - next: uint32(rand.IntN(len(pickers))), + next: uint32(randIntN(len(pickers))), } es.cc.UpdateState(balancer.State{ ConnectivityState: aggState, @@ -326,15 +361,13 @@ func (bw *balancerWrapper) UpdateState(state balancer.State) { // ExitIdle pings an IDLE child balancer to exit idle in a new goroutine to // avoid deadlocks due to synchronous balancer state updates. func (bw *balancerWrapper) ExitIdle() { - if ei, ok := bw.child.(balancer.ExitIdler); ok { - go func() { - bw.es.childMu.Lock() - if !bw.isClosed { - ei.ExitIdle() - } - bw.es.childMu.Unlock() - }() - } + go func() { + bw.es.childMu.Lock() + if !bw.isClosed { + bw.child.ExitIdle() + } + bw.es.childMu.Unlock() + }() } // updateClientConnStateLocked delivers the ClientConnState to the child diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go index 494314f23..67f315a0d 100644 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go @@ -54,18 +54,9 @@ func init() { balancer.Register(pickfirstBuilder{}) } -type ( - // enableHealthListenerKeyType is a unique key type used in resolver - // attributes to indicate whether the health listener usage is enabled. - enableHealthListenerKeyType struct{} - // managedByPickfirstKeyType is an attribute key type to inform Outlier - // Detection that the generic health listener is being used. - // TODO: https://github.com/grpc/grpc-go/issues/7915 - Remove this when - // implementing the dualstack design. This is a hack. Once Dualstack is - // completed, outlier detection will stop sending ejection updates through - // the connectivity listener. - managedByPickfirstKeyType struct{} -) +// enableHealthListenerKeyType is a unique key type used in resolver +// attributes to indicate whether the health listener usage is enabled. +type enableHealthListenerKeyType struct{} var ( logger = grpclog.Component("pick-first-leaf-lb") @@ -76,21 +67,21 @@ var ( disconnectionsMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ Name: "grpc.lb.pick_first.disconnections", Description: "EXPERIMENTAL. Number of times the selected subchannel becomes disconnected.", - Unit: "disconnection", + Unit: "{disconnection}", Labels: []string{"grpc.target"}, Default: false, }) connectionAttemptsSucceededMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ Name: "grpc.lb.pick_first.connection_attempts_succeeded", Description: "EXPERIMENTAL. Number of successful connection attempts.", - Unit: "attempt", + Unit: "{attempt}", Labels: []string{"grpc.target"}, Default: false, }) connectionAttemptsFailedMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ Name: "grpc.lb.pick_first.connection_attempts_failed", Description: "EXPERIMENTAL. Number of failed connection attempts.", - Unit: "attempt", + Unit: "{attempt}", Labels: []string{"grpc.target"}, Default: false, }) @@ -149,17 +140,6 @@ func EnableHealthListener(state resolver.State) resolver.State { return state } -// IsManagedByPickfirst returns whether an address belongs to a SubConn -// managed by the pickfirst LB policy. -// TODO: https://github.com/grpc/grpc-go/issues/7915 - This is a hack to disable -// outlier_detection via the with connectivity listener when using pick_first. -// Once Dualstack changes are complete, all SubConns will be created by -// pick_first and outlier detection will only use the health listener for -// ejection. This hack can then be removed. -func IsManagedByPickfirst(addr resolver.Address) bool { - return addr.BalancerAttributes.Value(managedByPickfirstKeyType{}) != nil -} - type pfConfig struct { serviceconfig.LoadBalancingConfig `json:"-"` @@ -186,7 +166,6 @@ type scData struct { } func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) { - addr.BalancerAttributes = addr.BalancerAttributes.WithValue(managedByPickfirstKeyType{}, true) sd := &scData{ rawConnectivityState: connectivity.Idle, effectiveState: connectivity.Idle, diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go index 35da5d1ec..22045bf39 100644 --- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -70,10 +70,3 @@ func (b *rrBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { ResolverState: pickfirstleaf.EnableHealthListener(ccs.ResolverState), }) } - -func (b *rrBalancer) ExitIdle() { - // Should always be ok, as child is endpoint sharding. - if ei, ok := b.Balancer.(balancer.ExitIdler); ok { - ei.ExitIdle() - } -} diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index cd3eaf8dd..3f762285d 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -208,7 +208,7 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) channelz.Infof(logger, cc.channelz, "Channel authority set to %q", cc.authority) cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelz) - cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers) + cc.pickerWrapper = newPickerWrapper() cc.metricsRecorderList = stats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers) @@ -1076,13 +1076,6 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { return cc.sc.healthCheckConfig } -func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, balancer.PickResult, error) { - return cc.pickerWrapper.pick(ctx, failfast, balancer.PickInfo{ - Ctx: ctx, - FullMethodName: method, - }) -} - func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector) { if sc == nil { // should never reach here. @@ -1831,7 +1824,7 @@ func (cc *ClientConn) initAuthority() error { } else if auth, ok := cc.resolverBuilder.(resolver.AuthorityOverrider); ok { cc.authority = auth.OverrideAuthority(cc.parsedTarget) } else if strings.HasPrefix(endpoint, ":") { - cc.authority = "localhost" + endpoint + cc.authority = "localhost" + encodeAuthority(endpoint) } else { cc.authority = encodeAuthority(endpoint) } diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index a63ab606e..c8e337cdd 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -96,10 +96,11 @@ func (c CommonAuthInfo) GetCommonAuthInfo() CommonAuthInfo { return c } -// ProtocolInfo provides information regarding the gRPC wire protocol version, -// security protocol, security protocol version in use, server name, etc. +// ProtocolInfo provides static information regarding transport credentials. type ProtocolInfo struct { // ProtocolVersion is the gRPC wire protocol version. + // + // Deprecated: this is unused by gRPC. ProtocolVersion string // SecurityProtocol is the security protocol in use. SecurityProtocol string @@ -109,7 +110,16 @@ type ProtocolInfo struct { // // Deprecated: please use Peer.AuthInfo. SecurityVersion string - // ServerName is the user-configured server name. + // ServerName is the user-configured server name. If set, this overrides + // the default :authority header used for all RPCs on the channel using the + // containing credentials, unless grpc.WithAuthority is set on the channel, + // in which case that setting will take precedence. + // + // This must be a valid `:authority` header according to + // [RFC3986](https://datatracker.ietf.org/doc/html/rfc3986#section-3.2). + // + // Deprecated: Users should use grpc.WithAuthority to override the authority + // on a channel instead of configuring the credentials. ServerName string } @@ -173,12 +183,17 @@ type TransportCredentials interface { // Clone makes a copy of this TransportCredentials. Clone() TransportCredentials // OverrideServerName specifies the value used for the following: + // // - verifying the hostname on the returned certificates // - as SNI in the client's handshake to support virtual hosting // - as the value for `:authority` header at stream creation time // - // Deprecated: use grpc.WithAuthority instead. Will be supported - // throughout 1.x. + // The provided string should be a valid `:authority` header according to + // [RFC3986](https://datatracker.ietf.org/doc/html/rfc3986#section-3.2). + // + // Deprecated: this method is unused by gRPC. Users should use + // grpc.WithAuthority to override the authority on a channel instead of + // configuring the credentials. OverrideServerName(string) error } diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index 20f65f7bd..8277be7d6 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -110,14 +110,14 @@ func (c tlsCreds) Info() ProtocolInfo { func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) { // use local cfg to avoid clobbering ServerName if using multiple endpoints cfg := credinternal.CloneTLSConfig(c.config) - if cfg.ServerName == "" { - serverName, _, err := net.SplitHostPort(authority) - if err != nil { - // If the authority had no host port or if the authority cannot be parsed, use it as-is. - serverName = authority - } - cfg.ServerName = serverName + + serverName, _, err := net.SplitHostPort(authority) + if err != nil { + // If the authority had no host port or if the authority cannot be parsed, use it as-is. + serverName = authority } + cfg.ServerName = serverName + conn := tls.Client(rawConn, cfg) errChannel := make(chan error, 1) go func() { @@ -259,9 +259,11 @@ func applyDefaults(c *tls.Config) *tls.Config { // certificates to establish the identity of the client need to be included in // the credentials (eg: for mTLS), use NewTLS instead, where a complete // tls.Config can be specified. -// serverNameOverride is for testing only. If set to a non empty string, -// it will override the virtual host name of authority (e.g. :authority header -// field) in requests. +// +// serverNameOverride is for testing only. If set to a non empty string, it will +// override the virtual host name of authority (e.g. :authority header field) in +// requests. Users should use grpc.WithAuthority passed to grpc.NewClient to +// override the authority of the client instead. func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials { return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}) } @@ -271,9 +273,11 @@ func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) Transpor // certificates to establish the identity of the client need to be included in // the credentials (eg: for mTLS), use NewTLS instead, where a complete // tls.Config can be specified. -// serverNameOverride is for testing only. If set to a non empty string, -// it will override the virtual host name of authority (e.g. :authority header -// field) in requests. +// +// serverNameOverride is for testing only. If set to a non empty string, it will +// override the virtual host name of authority (e.g. :authority header field) in +// requests. Users should use grpc.WithAuthority passed to grpc.NewClient to +// override the authority of the client instead. func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { b, err := os.ReadFile(certFile) if err != nil { diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 050ba0f16..7a5ac2e7c 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -213,6 +213,7 @@ func WithReadBufferSize(s int) DialOption { func WithInitialWindowSize(s int32) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.InitialWindowSize = s + o.copts.StaticWindowSize = true }) } @@ -222,6 +223,26 @@ func WithInitialWindowSize(s int32) DialOption { func WithInitialConnWindowSize(s int32) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.InitialConnWindowSize = s + o.copts.StaticWindowSize = true + }) +} + +// WithStaticStreamWindowSize returns a DialOption which sets the initial +// stream window size to the value provided and disables dynamic flow control. +func WithStaticStreamWindowSize(s int32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.InitialWindowSize = s + o.copts.StaticWindowSize = true + }) +} + +// WithStaticConnWindowSize returns a DialOption which sets the initial +// connection window size to the value provided and disables dynamic flow +// control. +func WithStaticConnWindowSize(s int32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.InitialConnWindowSize = s + o.copts.StaticWindowSize = true }) } @@ -587,6 +608,8 @@ func WithChainStreamInterceptor(interceptors ...StreamClientInterceptor) DialOpt // WithAuthority returns a DialOption that specifies the value to be used as the // :authority pseudo-header and as the server name in authentication handshake. +// This overrides all other ways of setting authority on the channel, but can be +// overridden per-call by using grpc.CallAuthority. func WithAuthority(a string) DialOption { return newFuncDialOption(func(o *dialOptions) { o.authority = a diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go index 93136610e..f2c01f296 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go @@ -188,13 +188,13 @@ type HealthServer interface { type UnimplementedHealthServer struct{} func (UnimplementedHealthServer) Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Check not implemented") + return nil, status.Error(codes.Unimplemented, "method Check not implemented") } func (UnimplementedHealthServer) List(context.Context, *HealthListRequest) (*HealthListResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method List not implemented") + return nil, status.Error(codes.Unimplemented, "method List not implemented") } func (UnimplementedHealthServer) Watch(*HealthCheckRequest, grpc.ServerStreamingServer[HealthCheckResponse]) error { - return status.Errorf(codes.Unimplemented, "method Watch not implemented") + return status.Error(codes.Unimplemented, "method Watch not implemented") } func (UnimplementedHealthServer) testEmbeddedByValue() {} diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go index fbc1ca356..ba25b8988 100644 --- a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go @@ -223,15 +223,7 @@ func (gsb *Balancer) ExitIdle() { // There is no need to protect this read with a mutex, as the write to the // Balancer field happens in SwitchTo, which completes before this can be // called. - if ei, ok := balToUpdate.Balancer.(balancer.ExitIdler); ok { - ei.ExitIdle() - return - } - gsb.mu.Lock() - defer gsb.mu.Unlock() - for sc := range balToUpdate.subconns { - sc.Connect() - } + balToUpdate.ExitIdle() } // updateSubConnState forwards the update to the appropriate child. diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index f5f2bdeb8..7e060f5ed 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -26,30 +26,32 @@ import ( ) var ( - // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). + // EnableTXTServiceConfig is set if the DNS resolver should perform TXT + // lookups for service config ("GRPC_ENABLE_TXT_SERVICE_CONFIG" is not + // "false"). + EnableTXTServiceConfig = boolFromEnv("GRPC_ENABLE_TXT_SERVICE_CONFIG", true) + + // TXTErrIgnore is set if TXT errors should be ignored + // ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true) + // RingHashCap indicates the maximum ring size which defaults to 4096 // entries but may be overridden by setting the environment variable // "GRPC_RING_HASH_CAP". This does not override the default bounds // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M). RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024) - // LeastRequestLB is set if we should support the least_request_experimental - // LB policy, which can be enabled by setting the environment variable - // "GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST" to "true". - LeastRequestLB = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST", true) + // ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS // handshakes that can be performed. ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100) + // EnforceALPNEnabled is set if TLS connections to servers with ALPN disabled // should be rejected. The HTTP/2 protocol requires ALPN to be enabled, this // option is present for backward compatibility. This option may be overridden // by setting the environment variable "GRPC_ENFORCE_ALPN_ENABLED" to "true" // or "false". EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", true) - // XDSFallbackSupport is the env variable that controls whether support for - // xDS fallback is turned on. If this is unset or is false, only the first - // xDS server in the list of server configs will be used. - XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", true) + // NewPickFirstEnabled is set if the new pickfirst leaf policy is to be used // instead of the exiting pickfirst implementation. This can be disabled by // setting the environment variable "GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST" diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 3ac798e8e..2699223a2 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -182,35 +182,6 @@ var ( // other features, including the CSDS service. NewXDSResolverWithClientForTesting any // func(xdsclient.XDSClient) (resolver.Builder, error) - // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster - // Specifier Plugin for testing purposes, regardless of the XDSRLS environment - // variable. - // - // TODO: Remove this function once the RLS env var is removed. - RegisterRLSClusterSpecifierPluginForTesting func() - - // UnregisterRLSClusterSpecifierPluginForTesting unregisters the RLS Cluster - // Specifier Plugin for testing purposes. This is needed because there is no way - // to unregister the RLS Cluster Specifier Plugin after registering it solely - // for testing purposes using RegisterRLSClusterSpecifierPluginForTesting(). - // - // TODO: Remove this function once the RLS env var is removed. - UnregisterRLSClusterSpecifierPluginForTesting func() - - // RegisterRBACHTTPFilterForTesting registers the RBAC HTTP Filter for testing - // purposes, regardless of the RBAC environment variable. - // - // TODO: Remove this function once the RBAC env var is removed. - RegisterRBACHTTPFilterForTesting func() - - // UnregisterRBACHTTPFilterForTesting unregisters the RBAC HTTP Filter for - // testing purposes. This is needed because there is no way to unregister the - // HTTP Filter after registering it solely for testing purposes using - // RegisterRBACHTTPFilterForTesting(). - // - // TODO: Remove this function once the RBAC env var is removed. - UnregisterRBACHTTPFilterForTesting func() - // ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY. ORCAAllowAnyMinReportingInterval any // func(so *orca.ServiceOptions) diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index ba5c5a95d..ada5251cf 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -132,13 +132,13 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts // DNS address (non-IP). ctx, cancel := context.WithCancel(context.Background()) d := &dnsResolver{ - host: host, - port: port, - ctx: ctx, - cancel: cancel, - cc: cc, - rn: make(chan struct{}, 1), - disableServiceConfig: opts.DisableServiceConfig, + host: host, + port: port, + ctx: ctx, + cancel: cancel, + cc: cc, + rn: make(chan struct{}, 1), + enableServiceConfig: envconfig.EnableTXTServiceConfig && !opts.DisableServiceConfig, } d.resolver, err = internal.NewNetResolver(target.URL.Host) @@ -181,8 +181,8 @@ type dnsResolver struct { // finishes, race detector sometimes will warn lookup (READ the lookup // function pointers) inside watcher() goroutine has data race with // replaceNetFunc (WRITE the lookup function pointers). - wg sync.WaitGroup - disableServiceConfig bool + wg sync.WaitGroup + enableServiceConfig bool } // ResolveNow invoke an immediate resolution of the target that this @@ -346,7 +346,7 @@ func (d *dnsResolver) lookup() (*resolver.State, error) { if len(srv) > 0 { state = grpclbstate.Set(state, &grpclbstate.State{BalancerAddresses: srv}) } - if !d.disableServiceConfig { + if d.enableServiceConfig { state.ServiceConfig = d.lookupTXT(ctx) } return &state, nil diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index ef72fbb3a..a2831e5d0 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -40,6 +40,13 @@ var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) { e.SetMaxDynamicTableSizeLimit(v) } +// itemNodePool is used to reduce heap allocations. +var itemNodePool = sync.Pool{ + New: func() any { + return &itemNode{} + }, +} + type itemNode struct { it any next *itemNode @@ -51,7 +58,9 @@ type itemList struct { } func (il *itemList) enqueue(i any) { - n := &itemNode{it: i} + n := itemNodePool.Get().(*itemNode) + n.next = nil + n.it = i if il.tail == nil { il.head, il.tail = n, n return @@ -71,7 +80,9 @@ func (il *itemList) dequeue() any { return nil } i := il.head.it + temp := il.head il.head = il.head.next + itemNodePool.Put(temp) if il.head == nil { il.tail = nil } @@ -146,10 +157,11 @@ type earlyAbortStream struct { func (*earlyAbortStream) isTransportResponseFrame() bool { return false } type dataFrame struct { - streamID uint32 - endStream bool - h []byte - reader mem.Reader + streamID uint32 + endStream bool + h []byte + data mem.BufferSlice + processing bool // onEachWrite is called every time // a part of data is written out. onEachWrite func() @@ -234,6 +246,7 @@ type outStream struct { itl *itemList bytesOutStanding int wq *writeQuota + reader mem.Reader next *outStream prev *outStream @@ -461,7 +474,9 @@ func (c *controlBuffer) finish() { v.onOrphaned(ErrConnClosing) } case *dataFrame: - _ = v.reader.Close() + if !v.processing { + v.data.Free() + } } } @@ -650,10 +665,11 @@ func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error { func (l *loopyWriter) registerStreamHandler(h *registerStream) { str := &outStream{ - id: h.streamID, - state: empty, - itl: &itemList{}, - wq: h.wq, + id: h.streamID, + state: empty, + itl: &itemList{}, + wq: h.wq, + reader: mem.BufferSlice{}.Reader(), } l.estdStreams[h.streamID] = str } @@ -685,10 +701,11 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error { } // Case 2: Client wants to originate stream. str := &outStream{ - id: h.streamID, - state: empty, - itl: &itemList{}, - wq: h.wq, + id: h.streamID, + state: empty, + itl: &itemList{}, + wq: h.wq, + reader: mem.BufferSlice{}.Reader(), } return l.originateStream(str, h) } @@ -790,10 +807,13 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { // a RST_STREAM before stream initialization thus the stream might // not be established yet. delete(l.estdStreams, c.streamID) + str.reader.Close() str.deleteSelf() for head := str.itl.dequeueAll(); head != nil; head = head.next { if df, ok := head.it.(*dataFrame); ok { - _ = df.reader.Close() + if !df.processing { + df.data.Free() + } } } } @@ -928,7 +948,13 @@ func (l *loopyWriter) processData() (bool, error) { if str == nil { return true, nil } + reader := str.reader dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. + if !dataItem.processing { + dataItem.processing = true + str.reader.Reset(dataItem.data) + dataItem.data.Free() + } // A data item is represented by a dataFrame, since it later translates into // multiple HTTP2 data frames. // Every dataFrame has two buffers; h that keeps grpc-message header and data @@ -936,13 +962,13 @@ func (l *loopyWriter) processData() (bool, error) { // from data is copied to h to make as big as the maximum possible HTTP2 frame // size. - if len(dataItem.h) == 0 && dataItem.reader.Remaining() == 0 { // Empty data frame + if len(dataItem.h) == 0 && reader.Remaining() == 0 { // Empty data frame // Client sends out empty data frame with endStream = true if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil { return false, err } str.itl.dequeue() // remove the empty data item from stream - _ = dataItem.reader.Close() + _ = reader.Close() if str.itl.isEmpty() { str.state = empty } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers. @@ -971,8 +997,8 @@ func (l *loopyWriter) processData() (bool, error) { } // Compute how much of the header and data we can send within quota and max frame length hSize := min(maxSize, len(dataItem.h)) - dSize := min(maxSize-hSize, dataItem.reader.Remaining()) - remainingBytes := len(dataItem.h) + dataItem.reader.Remaining() - hSize - dSize + dSize := min(maxSize-hSize, reader.Remaining()) + remainingBytes := len(dataItem.h) + reader.Remaining() - hSize - dSize size := hSize + dSize var buf *[]byte @@ -993,7 +1019,7 @@ func (l *loopyWriter) processData() (bool, error) { defer pool.Put(buf) copy((*buf)[:hSize], dataItem.h) - _, _ = dataItem.reader.Read((*buf)[hSize:]) + _, _ = reader.Read((*buf)[hSize:]) } // Now that outgoing flow controls are checked we can replenish str's write quota @@ -1014,7 +1040,7 @@ func (l *loopyWriter) processData() (bool, error) { dataItem.h = dataItem.h[hSize:] if remainingBytes == 0 { // All the data from that message was written out. - _ = dataItem.reader.Close() + _ = reader.Close() str.itl.dequeue() } if str.itl.isEmpty() { diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index ef56592b9..5467fe971 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -309,11 +309,9 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts scheme = "https" } } - dynamicWindow := true icwz := int32(initialWindowSize) if opts.InitialConnWindowSize >= defaultWindowSize { icwz = opts.InitialConnWindowSize - dynamicWindow = false } writeBufSize := opts.WriteBufferSize readBufSize := opts.ReadBufferSize @@ -381,9 +379,8 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts t.controlBuf = newControlBuffer(t.ctxDone) if opts.InitialWindowSize >= defaultWindowSize { t.initialWindowSize = opts.InitialWindowSize - dynamicWindow = false } - if dynamicWindow { + if !opts.StaticWindowSize { t.bdpEst = &bdpEstimator{ bdp: initialWindowSize, updateFlowControl: t.updateFlowControl, @@ -1091,32 +1088,29 @@ func (t *http2Client) GracefulClose() { // Write formats the data into HTTP2 data frame(s) and sends it out. The caller // should proceed only if Write returns nil. func (t *http2Client) write(s *ClientStream, hdr []byte, data mem.BufferSlice, opts *WriteOptions) error { - reader := data.Reader() - if opts.Last { // If it's the last message, update stream state. if !s.compareAndSwapState(streamActive, streamWriteDone) { - _ = reader.Close() return errStreamDone } } else if s.getState() != streamActive { - _ = reader.Close() return errStreamDone } df := &dataFrame{ streamID: s.id, endStream: opts.Last, h: hdr, - reader: reader, + data: data, } - if hdr != nil || df.reader.Remaining() != 0 { // If it's not an empty data frame, check quota. - if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil { - _ = reader.Close() + dataLen := data.Len() + if hdr != nil || dataLen != 0 { // If it's not an empty data frame, check quota. + if err := s.wq.get(int32(len(hdr) + dataLen)); err != nil { return err } } + data.Ref() if err := t.controlBuf.put(df); err != nil { - _ = reader.Close() + data.Free() return err } t.incrMsgSent() diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index e4c3731bd..9f725e15a 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -132,6 +132,10 @@ type http2Server struct { maxStreamID uint32 // max stream ID ever seen logger *grpclog.PrefixLogger + // setResetPingStrikes is stored as a closure instead of making this a + // method on http2Server to avoid a heap allocation when converting a method + // to a closure for passing to frames objects. + setResetPingStrikes func() } // NewServerTransport creates a http2 transport with conn and configuration @@ -176,16 +180,13 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, Val: config.MaxStreams, }) } - dynamicWindow := true iwz := int32(initialWindowSize) if config.InitialWindowSize >= defaultWindowSize { iwz = config.InitialWindowSize - dynamicWindow = false } icwz := int32(initialWindowSize) if config.InitialConnWindowSize >= defaultWindowSize { icwz = config.InitialConnWindowSize - dynamicWindow = false } if iwz != defaultWindowSize { isettings = append(isettings, http2.Setting{ @@ -266,6 +267,9 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, initialWindowSize: iwz, bufferPool: config.BufferPool, } + t.setResetPingStrikes = func() { + atomic.StoreUint32(&t.resetPingStrikes, 1) + } var czSecurity credentials.ChannelzSecurityValue if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok { czSecurity = au.GetSecurityValue() @@ -285,7 +289,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, t.logger = prefixLoggerForServerTransport(t) t.controlBuf = newControlBuffer(t.done) - if dynamicWindow { + if !config.StaticWindowSize { t.bdpEst = &bdpEstimator{ bdp: initialWindowSize, updateFlowControl: t.updateFlowControl, @@ -596,10 +600,25 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade return nil } } + + if s.ctx.Err() != nil { + t.mu.Unlock() + // Early abort in case the timeout was zero or so low it already fired. + t.controlBuf.put(&earlyAbortStream{ + httpStatus: http.StatusOK, + streamID: s.id, + contentSubtype: s.contentSubtype, + status: status.New(codes.DeadlineExceeded, context.DeadlineExceeded.Error()), + rst: !frame.StreamEnded(), + }) + return nil + } + t.activeStreams[streamID] = s if len(t.activeStreams) == 1 { t.idle = time.Time{} } + // Start a timer to close the stream on reaching the deadline. if timeoutSet { // We need to wait for s.cancel to be updated before calling @@ -1016,10 +1035,6 @@ func (t *http2Server) writeHeader(s *ServerStream, md metadata.MD) error { return nil } -func (t *http2Server) setResetPingStrikes() { - atomic.StoreUint32(&t.resetPingStrikes, 1) -} - func (t *http2Server) writeHeaderLocked(s *ServerStream) error { // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields // first and create a slice of that exact size. @@ -1132,17 +1147,13 @@ func (t *http2Server) writeStatus(s *ServerStream, st *status.Status) error { // Write converts the data into HTTP2 data frame and sends it out. Non-nil error // is returns if it fails (e.g., framing error, transport error). func (t *http2Server) write(s *ServerStream, hdr []byte, data mem.BufferSlice, _ *WriteOptions) error { - reader := data.Reader() - if !s.isHeaderSent() { // Headers haven't been written yet. if err := t.writeHeader(s, nil); err != nil { - _ = reader.Close() return err } } else { // Writing headers checks for this condition. if s.getState() == streamDone { - _ = reader.Close() return t.streamContextErr(s) } } @@ -1150,15 +1161,16 @@ func (t *http2Server) write(s *ServerStream, hdr []byte, data mem.BufferSlice, _ df := &dataFrame{ streamID: s.id, h: hdr, - reader: reader, + data: data, onEachWrite: t.setResetPingStrikes, } - if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil { - _ = reader.Close() + dataLen := data.Len() + if err := s.wq.get(int32(len(hdr) + dataLen)); err != nil { return t.streamContextErr(s) } + data.Ref() if err := t.controlBuf.put(df); err != nil { - _ = reader.Close() + data.Free() return err } t.incrMsgSent() diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index 607d2c4ce..e3663f87f 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -200,9 +200,6 @@ func decodeTimeout(s string) (time.Duration, error) { if err != nil { return 0, err } - if t == 0 { - return 0, fmt.Errorf("transport: timeout must be positive: %q", s) - } const maxHours = math.MaxInt64 / uint64(time.Hour) if d == time.Hour && t > maxHours { // This timeout would overflow math.MaxInt64; clamp it. diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 1730a639f..7dd53e80a 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -466,6 +466,7 @@ type ServerConfig struct { MaxHeaderListSize *uint32 HeaderTableSize *uint32 BufferPool mem.BufferPool + StaticWindowSize bool } // ConnectOptions covers all relevant options for communicating with the server. @@ -504,6 +505,8 @@ type ConnectOptions struct { MaxHeaderListSize *uint32 // The mem.BufferPool to use when reading/writing to the wire. BufferPool mem.BufferPool + // StaticWindowSize controls whether dynamic window sizing is enabled. + StaticWindowSize bool } // WriteOptions provides additional hints and information for message diff --git a/vendor/google.golang.org/grpc/mem/buffer_slice.go b/vendor/google.golang.org/grpc/mem/buffer_slice.go index 65002e2cc..af510d20c 100644 --- a/vendor/google.golang.org/grpc/mem/buffer_slice.go +++ b/vendor/google.golang.org/grpc/mem/buffer_slice.go @@ -137,6 +137,9 @@ type Reader interface { Close() error // Remaining returns the number of unread bytes remaining in the slice. Remaining() int + // Reset frees the currently held buffer slice and starts reading from the + // provided slice. This allows reusing the reader object. + Reset(s BufferSlice) } type sliceReader struct { @@ -150,6 +153,14 @@ func (r *sliceReader) Remaining() int { return r.len } +func (r *sliceReader) Reset(s BufferSlice) { + r.data.Free() + s.Ref() + r.data = s + r.len = s.Len() + r.bufferIdx = 0 +} + func (r *sliceReader) Close() error { r.data.Free() r.data = nil diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index a2d2a798d..aa52bfe95 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -29,7 +29,6 @@ import ( "google.golang.org/grpc/internal/channelz" istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" - "google.golang.org/grpc/stats" "google.golang.org/grpc/status" ) @@ -48,14 +47,11 @@ type pickerGeneration struct { // actions and unblock when there's a picker update. type pickerWrapper struct { // If pickerGen holds a nil pointer, the pickerWrapper is closed. - pickerGen atomic.Pointer[pickerGeneration] - statsHandlers []stats.Handler // to record blocking picker calls + pickerGen atomic.Pointer[pickerGeneration] } -func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper { - pw := &pickerWrapper{ - statsHandlers: statsHandlers, - } +func newPickerWrapper() *pickerWrapper { + pw := &pickerWrapper{} pw.pickerGen.Store(&pickerGeneration{ blockingCh: make(chan struct{}), }) @@ -93,6 +89,12 @@ func doneChannelzWrapper(acbw *acBalancerWrapper, result *balancer.PickResult) { } } +type pick struct { + transport transport.ClientTransport // the selected transport + result balancer.PickResult // the contents of the pick from the LB policy + blocked bool // set if a picker call queued for a new picker +} + // pick returns the transport that will be used for the RPC. // It may block in the following cases: // - there's no picker @@ -100,15 +102,16 @@ func doneChannelzWrapper(acbw *acBalancerWrapper, result *balancer.PickResult) { // - the current picker returns other errors and failfast is false. // - the subConn returned by the current picker is not READY // When one of these situations happens, pick blocks until the picker gets updated. -func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, balancer.PickResult, error) { +func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (pick, error) { var ch chan struct{} var lastPickErr error + pickBlocked := false for { pg := pw.pickerGen.Load() if pg == nil { - return nil, balancer.PickResult{}, ErrClientConnClosing + return pick{}, ErrClientConnClosing } if pg.picker == nil { ch = pg.blockingCh @@ -127,9 +130,9 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. } switch ctx.Err() { case context.DeadlineExceeded: - return nil, balancer.PickResult{}, status.Error(codes.DeadlineExceeded, errStr) + return pick{}, status.Error(codes.DeadlineExceeded, errStr) case context.Canceled: - return nil, balancer.PickResult{}, status.Error(codes.Canceled, errStr) + return pick{}, status.Error(codes.Canceled, errStr) } case <-ch: } @@ -145,9 +148,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. // In the second case, the only way it will get to this conditional is // if there is a new picker. if ch != nil { - for _, sh := range pw.statsHandlers { - sh.HandleRPC(ctx, &stats.PickerUpdated{}) - } + pickBlocked = true } ch = pg.blockingCh @@ -164,7 +165,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. if istatus.IsRestrictedControlPlaneCode(st) { err = status.Errorf(codes.Internal, "received picker error with illegal status: %v", err) } - return nil, balancer.PickResult{}, dropError{error: err} + return pick{}, dropError{error: err} } // For all other errors, wait for ready RPCs should block and other // RPCs should fail with unavailable. @@ -172,7 +173,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. lastPickErr = err continue } - return nil, balancer.PickResult{}, status.Error(codes.Unavailable, err.Error()) + return pick{}, status.Error(codes.Unavailable, err.Error()) } acbw, ok := pickResult.SubConn.(*acBalancerWrapper) @@ -183,9 +184,8 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. if t := acbw.ac.getReadyTransport(); t != nil { if channelz.IsOn() { doneChannelzWrapper(acbw, &pickResult) - return t, pickResult, nil } - return t, pickResult, nil + return pick{transport: t, result: pickResult, blocked: pickBlocked}, nil } if pickResult.Done != nil { // Calling done with nil error, no bytes sent and no bytes received. diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index b84ef26d4..8e6af9514 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -332,6 +332,11 @@ type AuthorityOverrider interface { // OverrideAuthority returns the authority to use for a ClientConn with the // given target. The implementation must generate it without blocking, // typically in line, and must keep it unchanged. + // + // The returned string must be a valid ":authority" header value, i.e. be + // encoded according to + // [RFC3986](https://datatracker.ietf.org/doc/html/rfc3986#section-3.2) as + // necessary. OverrideAuthority(Target) string } diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 976e70ae0..1da2a542a 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -179,6 +179,7 @@ type serverOptions struct { numServerWorkers uint32 bufferPool mem.BufferPool waitForHandlers bool + staticWindowSize bool } var defaultServerOptions = serverOptions{ @@ -279,6 +280,7 @@ func ReadBufferSize(s int) ServerOption { func InitialWindowSize(s int32) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.initialWindowSize = s + o.staticWindowSize = true }) } @@ -287,6 +289,29 @@ func InitialWindowSize(s int32) ServerOption { func InitialConnWindowSize(s int32) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.initialConnWindowSize = s + o.staticWindowSize = true + }) +} + +// StaticStreamWindowSize returns a ServerOption to set the initial stream +// window size to the value provided and disables dynamic flow control. +// The lower bound for window size is 64K and any value smaller than that +// will be ignored. +func StaticStreamWindowSize(s int32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.initialWindowSize = s + o.staticWindowSize = true + }) +} + +// StaticConnWindowSize returns a ServerOption to set the initial connection +// window size to the value provided and disables dynamic flow control. +// The lower bound for window size is 64K and any value smaller than that +// will be ignored. +func StaticConnWindowSize(s int32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.initialConnWindowSize = s + o.staticWindowSize = true }) } @@ -986,6 +1011,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { MaxHeaderListSize: s.opts.maxHeaderListSize, HeaderTableSize: s.opts.headerTableSize, BufferPool: s.opts.bufferPool, + StaticWindowSize: s.opts.staticWindowSize, } st, err := transport.NewServerTransport(c, config) if err != nil { @@ -1572,6 +1598,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv s: stream, p: &parser{r: stream, bufferPool: s.opts.bufferPool}, codec: s.getCodec(stream.ContentSubtype()), + desc: sd, maxReceiveMessageSize: s.opts.maxReceiveMessageSize, maxSendMessageSize: s.opts.maxSendMessageSize, trInfo: trInfo, diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index baf7740ef..10bf998aa 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -64,15 +64,21 @@ func (s *Begin) IsClient() bool { return s.Client } func (s *Begin) isRPCStats() {} -// PickerUpdated indicates that the LB policy provided a new picker while the -// RPC was waiting for one. -type PickerUpdated struct{} +// DelayedPickComplete indicates that the RPC is unblocked following a delay in +// selecting a connection for the call. +type DelayedPickComplete struct{} -// IsClient indicates if the stats information is from client side. Only Client -// Side interfaces with a Picker, thus always returns true. -func (*PickerUpdated) IsClient() bool { return true } +// IsClient indicates DelayedPickComplete is available on the client. +func (*DelayedPickComplete) IsClient() bool { return true } -func (*PickerUpdated) isRPCStats() {} +func (*DelayedPickComplete) isRPCStats() {} + +// PickerUpdated indicates that the RPC is unblocked following a delay in +// selecting a connection for the call. +// +// Deprecated: will be removed in a future release; use DelayedPickComplete +// instead. +type PickerUpdated = DelayedPickComplete // InPayload contains stats about an incoming payload. type InPayload struct { diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index d58bb6471..d9bbd4c57 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -469,8 +469,9 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) func (a *csAttempt) getTransport() error { cs := a.cs - var err error - a.transport, a.pickResult, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method) + pickInfo := balancer.PickInfo{Ctx: a.ctx, FullMethodName: cs.callHdr.Method} + pick, err := cs.cc.pickerWrapper.pick(a.ctx, cs.callInfo.failFast, pickInfo) + a.transport, a.pickResult = pick.transport, pick.result if err != nil { if de, ok := err.(dropError); ok { err = de.error @@ -481,6 +482,11 @@ func (a *csAttempt) getTransport() error { if a.trInfo != nil { a.trInfo.firstLine.SetRemoteAddr(a.transport.RemoteAddr()) } + if pick.blocked { + for _, sh := range a.statsHandlers { + sh.HandleRPC(a.ctx, &stats.DelayedPickComplete{}) + } + } return nil } @@ -1171,7 +1177,7 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { } else if err != nil { return toRPCErr(err) } - return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) + return status.Errorf(codes.Internal, "cardinality violation: expected for non server-streaming RPCs, but received another message") } func (a *csAttempt) finish(err error) { @@ -1495,7 +1501,7 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { } else if err != nil { return toRPCErr(err) } - return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) + return status.Errorf(codes.Internal, "cardinality violation: expected for non server-streaming RPCs, but received another message") } func (as *addrConnStream) finish(err error) { @@ -1580,6 +1586,7 @@ type serverStream struct { s *transport.ServerStream p *parser codec baseCodec + desc *StreamDesc compressorV0 Compressor compressorV1 encoding.Compressor @@ -1588,6 +1595,8 @@ type serverStream struct { sendCompressorName string + recvFirstMsg bool // set after the first message is received + maxReceiveMessageSize int maxSendMessageSize int trInfo *traceInfo @@ -1774,6 +1783,10 @@ func (ss *serverStream) RecvMsg(m any) (err error) { binlog.Log(ss.ctx, chc) } } + // Received no request msg for non-client streaming rpcs. + if !ss.desc.ClientStreams && !ss.recvFirstMsg { + return status.Error(codes.Internal, "cardinality violation: received no request message from non-client-streaming RPC") + } return err } if err == io.ErrUnexpectedEOF { @@ -1781,6 +1794,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) { } return toRPCErr(err) } + ss.recvFirstMsg = true if len(ss.statsHandler) != 0 { for _, sh := range ss.statsHandler { sh.HandleRPC(ss.s.Context(), &stats.InPayload{ @@ -1800,7 +1814,19 @@ func (ss *serverStream) RecvMsg(m any) (err error) { binlog.Log(ss.ctx, cm) } } - return nil + + if ss.desc.ClientStreams { + // Subsequent messages should be received by subsequent RecvMsg calls. + return nil + } + // Special handling for non-client-stream rpcs. + // This recv expects EOF or errors, so we don't collect inPayload. + if err := recv(ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, nil, ss.decompressorV1, true); err == io.EOF { + return nil + } else if err != nil { + return err + } + return status.Error(codes.Internal, "cardinality violation: received multiple request messages for non-client-streaming RPC") } // MethodFromServerStream returns the method string for the input stream. diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index bd82673dc..bc1eb290f 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.73.0" +const Version = "1.75.0" diff --git a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go index e942bc983..743bfb81d 100644 --- a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go +++ b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go @@ -371,7 +371,31 @@ func ConsumeVarint(b []byte) (v uint64, n int) { func SizeVarint(v uint64) int { // This computes 1 + (bits.Len64(v)-1)/7. // 9/64 is a good enough approximation of 1/7 - return int(9*uint32(bits.Len64(v))+64) / 64 + // + // The Go compiler can translate the bits.LeadingZeros64 call into the LZCNT + // instruction, which is very fast on CPUs from the last few years. The + // specific way of expressing the calculation matches C++ Protobuf, see + // https://godbolt.org/z/4P3h53oM4 for the C++ code and how gcc/clang + // optimize that function for GOAMD64=v1 and GOAMD64=v3 (-march=haswell). + + // By OR'ing v with 1, we guarantee that v is never 0, without changing the + // result of SizeVarint. LZCNT is not defined for 0, meaning the compiler + // needs to add extra instructions to handle that case. + // + // The Go compiler currently (go1.24.4) does not make use of this knowledge. + // This opportunity (removing the XOR instruction, which handles the 0 case) + // results in a small (1%) performance win across CPU architectures. + // + // Independently of avoiding the 0 case, we need the v |= 1 line because + // it allows the Go compiler to eliminate an extra XCHGL barrier. + v |= 1 + + // It would be clearer to write log2value := 63 - uint32(...), but + // writing uint32(...) ^ 63 is much more efficient (-14% ARM, -20% Intel). + // Proof of identity for our value range [0..63]: + // https://go.dev/play/p/Pdn9hEWYakX + log2value := uint32(bits.LeadingZeros64(v)) ^ 63 + return int((log2value*9 + (64 + 9)) / 64) } // AppendFixed32 appends v to b as a little-endian uint32. diff --git a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb index 323829da1..04696351e 100644 Binary files a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb and b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb differ diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go index b08b71830..a0aad2777 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go @@ -72,6 +72,9 @@ func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures { case genid.FeatureSet_EnforceNamingStyle_field_number: // EnforceNamingStyle is enforced in protoc, languages other than C++ // are not supposed to do anything with this feature. + case genid.FeatureSet_DefaultSymbolVisibility_field_number: + // DefaultSymbolVisibility is enforced in protoc, runtimes should not + // inspect this value. default: panic(fmt.Sprintf("unkown field number %d while unmarshalling FeatureSet", num)) } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/presence.go b/vendor/google.golang.org/protobuf/internal/filedesc/presence.go new file mode 100644 index 000000000..a12ec9791 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/presence.go @@ -0,0 +1,33 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package filedesc + +import "google.golang.org/protobuf/reflect/protoreflect" + +// UsePresenceForField reports whether the presence bitmap should be used for +// the specified field. +func UsePresenceForField(fd protoreflect.FieldDescriptor) (usePresence, canBeLazy bool) { + switch { + case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): + // Oneof fields never use the presence bitmap. + // + // Synthetic oneofs are an exception: Those are used to implement proto3 + // optional fields and hence should follow non-oneof field semantics. + return false, false + + case fd.IsMap(): + // Map-typed fields never use the presence bitmap. + return false, false + + case fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind: + // Lazy fields always use the presence bitmap (only messages can be lazy). + isLazy := fd.(interface{ IsLazy() bool }).IsLazy() + return isLazy, isLazy + + default: + // If the field has presence, use the presence bitmap. + return fd.HasPresence(), false + } +} diff --git a/vendor/google.golang.org/protobuf/internal/genid/api_gen.go b/vendor/google.golang.org/protobuf/internal/genid/api_gen.go index df8f91850..3ceb6fa7f 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/api_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/api_gen.go @@ -27,6 +27,7 @@ const ( Api_SourceContext_field_name protoreflect.Name = "source_context" Api_Mixins_field_name protoreflect.Name = "mixins" Api_Syntax_field_name protoreflect.Name = "syntax" + Api_Edition_field_name protoreflect.Name = "edition" Api_Name_field_fullname protoreflect.FullName = "google.protobuf.Api.name" Api_Methods_field_fullname protoreflect.FullName = "google.protobuf.Api.methods" @@ -35,6 +36,7 @@ const ( Api_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Api.source_context" Api_Mixins_field_fullname protoreflect.FullName = "google.protobuf.Api.mixins" Api_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Api.syntax" + Api_Edition_field_fullname protoreflect.FullName = "google.protobuf.Api.edition" ) // Field numbers for google.protobuf.Api. @@ -46,6 +48,7 @@ const ( Api_SourceContext_field_number protoreflect.FieldNumber = 5 Api_Mixins_field_number protoreflect.FieldNumber = 6 Api_Syntax_field_number protoreflect.FieldNumber = 7 + Api_Edition_field_number protoreflect.FieldNumber = 8 ) // Names for google.protobuf.Method. @@ -63,6 +66,7 @@ const ( Method_ResponseStreaming_field_name protoreflect.Name = "response_streaming" Method_Options_field_name protoreflect.Name = "options" Method_Syntax_field_name protoreflect.Name = "syntax" + Method_Edition_field_name protoreflect.Name = "edition" Method_Name_field_fullname protoreflect.FullName = "google.protobuf.Method.name" Method_RequestTypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Method.request_type_url" @@ -71,6 +75,7 @@ const ( Method_ResponseStreaming_field_fullname protoreflect.FullName = "google.protobuf.Method.response_streaming" Method_Options_field_fullname protoreflect.FullName = "google.protobuf.Method.options" Method_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Method.syntax" + Method_Edition_field_fullname protoreflect.FullName = "google.protobuf.Method.edition" ) // Field numbers for google.protobuf.Method. @@ -82,6 +87,7 @@ const ( Method_ResponseStreaming_field_number protoreflect.FieldNumber = 5 Method_Options_field_number protoreflect.FieldNumber = 6 Method_Syntax_field_number protoreflect.FieldNumber = 7 + Method_Edition_field_number protoreflect.FieldNumber = 8 ) // Names for google.protobuf.Mixin. diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go index 39524782a..950a6a325 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -34,6 +34,19 @@ const ( Edition_EDITION_MAX_enum_value = 2147483647 ) +// Full and short names for google.protobuf.SymbolVisibility. +const ( + SymbolVisibility_enum_fullname = "google.protobuf.SymbolVisibility" + SymbolVisibility_enum_name = "SymbolVisibility" +) + +// Enum values for google.protobuf.SymbolVisibility. +const ( + SymbolVisibility_VISIBILITY_UNSET_enum_value = 0 + SymbolVisibility_VISIBILITY_LOCAL_enum_value = 1 + SymbolVisibility_VISIBILITY_EXPORT_enum_value = 2 +) + // Names for google.protobuf.FileDescriptorSet. const ( FileDescriptorSet_message_name protoreflect.Name = "FileDescriptorSet" @@ -65,6 +78,7 @@ const ( FileDescriptorProto_Dependency_field_name protoreflect.Name = "dependency" FileDescriptorProto_PublicDependency_field_name protoreflect.Name = "public_dependency" FileDescriptorProto_WeakDependency_field_name protoreflect.Name = "weak_dependency" + FileDescriptorProto_OptionDependency_field_name protoreflect.Name = "option_dependency" FileDescriptorProto_MessageType_field_name protoreflect.Name = "message_type" FileDescriptorProto_EnumType_field_name protoreflect.Name = "enum_type" FileDescriptorProto_Service_field_name protoreflect.Name = "service" @@ -79,6 +93,7 @@ const ( FileDescriptorProto_Dependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.dependency" FileDescriptorProto_PublicDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.public_dependency" FileDescriptorProto_WeakDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.weak_dependency" + FileDescriptorProto_OptionDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.option_dependency" FileDescriptorProto_MessageType_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.message_type" FileDescriptorProto_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.enum_type" FileDescriptorProto_Service_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.service" @@ -96,6 +111,7 @@ const ( FileDescriptorProto_Dependency_field_number protoreflect.FieldNumber = 3 FileDescriptorProto_PublicDependency_field_number protoreflect.FieldNumber = 10 FileDescriptorProto_WeakDependency_field_number protoreflect.FieldNumber = 11 + FileDescriptorProto_OptionDependency_field_number protoreflect.FieldNumber = 15 FileDescriptorProto_MessageType_field_number protoreflect.FieldNumber = 4 FileDescriptorProto_EnumType_field_number protoreflect.FieldNumber = 5 FileDescriptorProto_Service_field_number protoreflect.FieldNumber = 6 @@ -124,6 +140,7 @@ const ( DescriptorProto_Options_field_name protoreflect.Name = "options" DescriptorProto_ReservedRange_field_name protoreflect.Name = "reserved_range" DescriptorProto_ReservedName_field_name protoreflect.Name = "reserved_name" + DescriptorProto_Visibility_field_name protoreflect.Name = "visibility" DescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.name" DescriptorProto_Field_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.field" @@ -135,6 +152,7 @@ const ( DescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.options" DescriptorProto_ReservedRange_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.reserved_range" DescriptorProto_ReservedName_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.reserved_name" + DescriptorProto_Visibility_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.visibility" ) // Field numbers for google.protobuf.DescriptorProto. @@ -149,6 +167,7 @@ const ( DescriptorProto_Options_field_number protoreflect.FieldNumber = 7 DescriptorProto_ReservedRange_field_number protoreflect.FieldNumber = 9 DescriptorProto_ReservedName_field_number protoreflect.FieldNumber = 10 + DescriptorProto_Visibility_field_number protoreflect.FieldNumber = 11 ) // Names for google.protobuf.DescriptorProto.ExtensionRange. @@ -388,12 +407,14 @@ const ( EnumDescriptorProto_Options_field_name protoreflect.Name = "options" EnumDescriptorProto_ReservedRange_field_name protoreflect.Name = "reserved_range" EnumDescriptorProto_ReservedName_field_name protoreflect.Name = "reserved_name" + EnumDescriptorProto_Visibility_field_name protoreflect.Name = "visibility" EnumDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.name" EnumDescriptorProto_Value_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.value" EnumDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.options" EnumDescriptorProto_ReservedRange_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.reserved_range" EnumDescriptorProto_ReservedName_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.reserved_name" + EnumDescriptorProto_Visibility_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.visibility" ) // Field numbers for google.protobuf.EnumDescriptorProto. @@ -403,6 +424,7 @@ const ( EnumDescriptorProto_Options_field_number protoreflect.FieldNumber = 3 EnumDescriptorProto_ReservedRange_field_number protoreflect.FieldNumber = 4 EnumDescriptorProto_ReservedName_field_number protoreflect.FieldNumber = 5 + EnumDescriptorProto_Visibility_field_number protoreflect.FieldNumber = 6 ) // Names for google.protobuf.EnumDescriptorProto.EnumReservedRange. @@ -1008,32 +1030,35 @@ const ( // Field names for google.protobuf.FeatureSet. const ( - FeatureSet_FieldPresence_field_name protoreflect.Name = "field_presence" - FeatureSet_EnumType_field_name protoreflect.Name = "enum_type" - FeatureSet_RepeatedFieldEncoding_field_name protoreflect.Name = "repeated_field_encoding" - FeatureSet_Utf8Validation_field_name protoreflect.Name = "utf8_validation" - FeatureSet_MessageEncoding_field_name protoreflect.Name = "message_encoding" - FeatureSet_JsonFormat_field_name protoreflect.Name = "json_format" - FeatureSet_EnforceNamingStyle_field_name protoreflect.Name = "enforce_naming_style" - - FeatureSet_FieldPresence_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.field_presence" - FeatureSet_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enum_type" - FeatureSet_RepeatedFieldEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.repeated_field_encoding" - FeatureSet_Utf8Validation_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.utf8_validation" - FeatureSet_MessageEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.message_encoding" - FeatureSet_JsonFormat_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.json_format" - FeatureSet_EnforceNamingStyle_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enforce_naming_style" + FeatureSet_FieldPresence_field_name protoreflect.Name = "field_presence" + FeatureSet_EnumType_field_name protoreflect.Name = "enum_type" + FeatureSet_RepeatedFieldEncoding_field_name protoreflect.Name = "repeated_field_encoding" + FeatureSet_Utf8Validation_field_name protoreflect.Name = "utf8_validation" + FeatureSet_MessageEncoding_field_name protoreflect.Name = "message_encoding" + FeatureSet_JsonFormat_field_name protoreflect.Name = "json_format" + FeatureSet_EnforceNamingStyle_field_name protoreflect.Name = "enforce_naming_style" + FeatureSet_DefaultSymbolVisibility_field_name protoreflect.Name = "default_symbol_visibility" + + FeatureSet_FieldPresence_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.field_presence" + FeatureSet_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enum_type" + FeatureSet_RepeatedFieldEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.repeated_field_encoding" + FeatureSet_Utf8Validation_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.utf8_validation" + FeatureSet_MessageEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.message_encoding" + FeatureSet_JsonFormat_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.json_format" + FeatureSet_EnforceNamingStyle_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enforce_naming_style" + FeatureSet_DefaultSymbolVisibility_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.default_symbol_visibility" ) // Field numbers for google.protobuf.FeatureSet. const ( - FeatureSet_FieldPresence_field_number protoreflect.FieldNumber = 1 - FeatureSet_EnumType_field_number protoreflect.FieldNumber = 2 - FeatureSet_RepeatedFieldEncoding_field_number protoreflect.FieldNumber = 3 - FeatureSet_Utf8Validation_field_number protoreflect.FieldNumber = 4 - FeatureSet_MessageEncoding_field_number protoreflect.FieldNumber = 5 - FeatureSet_JsonFormat_field_number protoreflect.FieldNumber = 6 - FeatureSet_EnforceNamingStyle_field_number protoreflect.FieldNumber = 7 + FeatureSet_FieldPresence_field_number protoreflect.FieldNumber = 1 + FeatureSet_EnumType_field_number protoreflect.FieldNumber = 2 + FeatureSet_RepeatedFieldEncoding_field_number protoreflect.FieldNumber = 3 + FeatureSet_Utf8Validation_field_number protoreflect.FieldNumber = 4 + FeatureSet_MessageEncoding_field_number protoreflect.FieldNumber = 5 + FeatureSet_JsonFormat_field_number protoreflect.FieldNumber = 6 + FeatureSet_EnforceNamingStyle_field_number protoreflect.FieldNumber = 7 + FeatureSet_DefaultSymbolVisibility_field_number protoreflect.FieldNumber = 8 ) // Full and short names for google.protobuf.FeatureSet.FieldPresence. @@ -1128,6 +1153,27 @@ const ( FeatureSet_STYLE_LEGACY_enum_value = 2 ) +// Names for google.protobuf.FeatureSet.VisibilityFeature. +const ( + FeatureSet_VisibilityFeature_message_name protoreflect.Name = "VisibilityFeature" + FeatureSet_VisibilityFeature_message_fullname protoreflect.FullName = "google.protobuf.FeatureSet.VisibilityFeature" +) + +// Full and short names for google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility. +const ( + FeatureSet_VisibilityFeature_DefaultSymbolVisibility_enum_fullname = "google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility" + FeatureSet_VisibilityFeature_DefaultSymbolVisibility_enum_name = "DefaultSymbolVisibility" +) + +// Enum values for google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility. +const ( + FeatureSet_VisibilityFeature_DEFAULT_SYMBOL_VISIBILITY_UNKNOWN_enum_value = 0 + FeatureSet_VisibilityFeature_EXPORT_ALL_enum_value = 1 + FeatureSet_VisibilityFeature_EXPORT_TOP_LEVEL_enum_value = 2 + FeatureSet_VisibilityFeature_LOCAL_ALL_enum_value = 3 + FeatureSet_VisibilityFeature_STRICT_enum_value = 4 +) + // Names for google.protobuf.FeatureSetDefaults. const ( FeatureSetDefaults_message_name protoreflect.Name = "FeatureSetDefaults" diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go index 41c1f74ef..bdad12a9b 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go @@ -11,6 +11,7 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/internal/order" "google.golang.org/protobuf/reflect/protoreflect" piface "google.golang.org/protobuf/runtime/protoiface" @@ -80,7 +81,7 @@ func (mi *MessageInfo) makeOpaqueCoderMethods(t reflect.Type, si opaqueStructInf // permit us to skip over definitely-unset fields at marshal time. var hasPresence bool - hasPresence, cf.isLazy = usePresenceForField(si, fd) + hasPresence, cf.isLazy = filedesc.UsePresenceForField(fd) if hasPresence { cf.presenceIndex, mi.presenceSize = presenceIndex(mi.Desc, fd) diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go index dd55e8e00..5a439daac 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go @@ -11,6 +11,7 @@ import ( "strings" "sync/atomic" + "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/reflect/protoreflect" ) @@ -53,7 +54,7 @@ func opaqueInitHook(mi *MessageInfo) bool { fd := fds.Get(i) fs := si.fieldsByNumber[fd.Number()] var fi fieldInfo - usePresence, _ := usePresenceForField(si, fd) + usePresence, _ := filedesc.UsePresenceForField(fd) switch { case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): @@ -343,17 +344,15 @@ func (mi *MessageInfo) fieldInfoForMessageListOpaqueNoPresence(si opaqueStructIn if p.IsNil() { return false } - sp := p.Apply(fieldOffset).AtomicGetPointer() - if sp.IsNil() { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() { return false } - rv := sp.AsValueOf(fs.Type.Elem()) return rv.Elem().Len() > 0 }, clear: func(p pointer) { - sp := p.Apply(fieldOffset).AtomicGetPointer() - if !sp.IsNil() { - rv := sp.AsValueOf(fs.Type.Elem()) + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if !rv.IsNil() { rv.Elem().Set(reflect.Zero(rv.Type().Elem())) } }, @@ -361,11 +360,10 @@ func (mi *MessageInfo) fieldInfoForMessageListOpaqueNoPresence(si opaqueStructIn if p.IsNil() { return conv.Zero() } - sp := p.Apply(fieldOffset).AtomicGetPointer() - if sp.IsNil() { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() { return conv.Zero() } - rv := sp.AsValueOf(fs.Type.Elem()) if rv.Elem().Len() == 0 { return conv.Zero() } @@ -598,30 +596,3 @@ func (mi *MessageInfo) clearPresent(p pointer, index uint32) { func (mi *MessageInfo) present(p pointer, index uint32) bool { return p.Apply(mi.presenceOffset).PresenceInfo().Present(index) } - -// usePresenceForField implements the somewhat intricate logic of when -// the presence bitmap is used for a field. The main logic is that a -// field that is optional or that can be lazy will use the presence -// bit, but for proto2, also maps have a presence bit. It also records -// if the field can ever be lazy, which is true if we have a -// lazyOffset and the field is a message or a slice of messages. A -// field that is lazy will always need a presence bit. Oneofs are not -// lazy and do not use presence, unless they are a synthetic oneof, -// which is a proto3 optional field. For proto3 optionals, we use the -// presence and they can also be lazy when applicable (a message). -func usePresenceForField(si opaqueStructInfo, fd protoreflect.FieldDescriptor) (usePresence, canBeLazy bool) { - hasLazyField := fd.(interface{ IsLazy() bool }).IsLazy() - - // Non-oneof scalar fields with explicit field presence use the presence array. - usesPresenceArray := fd.HasPresence() && fd.Message() == nil && (fd.ContainingOneof() == nil || fd.ContainingOneof().IsSynthetic()) - switch { - case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): - return false, false - case fd.IsMap(): - return false, false - case fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind: - return hasLazyField, hasLazyField - default: - return usesPresenceArray || (hasLazyField && fd.HasPresence()), false - } -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/presence.go b/vendor/google.golang.org/protobuf/internal/impl/presence.go index 914cb1ded..443afe81c 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/presence.go +++ b/vendor/google.golang.org/protobuf/internal/impl/presence.go @@ -32,9 +32,6 @@ func (p presence) toElem(num uint32) (ret *uint32) { // Present checks for the presence of a specific field number in a presence set. func (p presence) Present(num uint32) bool { - if p.P == nil { - return false - } return Export{}.Present(p.toElem(num), num) } diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index aac1cb18a..697d1c14f 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -52,7 +52,7 @@ import ( const ( Major = 1 Minor = 36 - Patch = 6 + Patch = 8 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go index a4a0a2971..730331e66 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go @@ -21,6 +21,8 @@ func (p *SourcePath) appendFileDescriptorProto(b []byte) []byte { b = p.appendRepeatedField(b, "public_dependency", nil) case 11: b = p.appendRepeatedField(b, "weak_dependency", nil) + case 15: + b = p.appendRepeatedField(b, "option_dependency", nil) case 4: b = p.appendRepeatedField(b, "message_type", (*SourcePath).appendDescriptorProto) case 5: @@ -66,6 +68,8 @@ func (p *SourcePath) appendDescriptorProto(b []byte) []byte { b = p.appendRepeatedField(b, "reserved_range", (*SourcePath).appendDescriptorProto_ReservedRange) case 10: b = p.appendRepeatedField(b, "reserved_name", nil) + case 11: + b = p.appendSingularField(b, "visibility", nil) } return b } @@ -85,6 +89,8 @@ func (p *SourcePath) appendEnumDescriptorProto(b []byte) []byte { b = p.appendRepeatedField(b, "reserved_range", (*SourcePath).appendEnumDescriptorProto_EnumReservedRange) case 5: b = p.appendRepeatedField(b, "reserved_name", nil) + case 6: + b = p.appendSingularField(b, "visibility", nil) } return b } @@ -400,6 +406,8 @@ func (p *SourcePath) appendFeatureSet(b []byte) []byte { b = p.appendSingularField(b, "json_format", nil) case 7: b = p.appendSingularField(b, "enforce_naming_style", nil) + case 8: + b = p.appendSingularField(b, "default_symbol_visibility", nil) } return b } diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index 7fe280f19..4eacb523c 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -151,6 +151,70 @@ func (Edition) EnumDescriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{0} } +// Describes the 'visibility' of a symbol with respect to the proto import +// system. Symbols can only be imported when the visibility rules do not prevent +// it (ex: local symbols cannot be imported). Visibility modifiers can only set +// on `message` and `enum` as they are the only types available to be referenced +// from other files. +type SymbolVisibility int32 + +const ( + SymbolVisibility_VISIBILITY_UNSET SymbolVisibility = 0 + SymbolVisibility_VISIBILITY_LOCAL SymbolVisibility = 1 + SymbolVisibility_VISIBILITY_EXPORT SymbolVisibility = 2 +) + +// Enum value maps for SymbolVisibility. +var ( + SymbolVisibility_name = map[int32]string{ + 0: "VISIBILITY_UNSET", + 1: "VISIBILITY_LOCAL", + 2: "VISIBILITY_EXPORT", + } + SymbolVisibility_value = map[string]int32{ + "VISIBILITY_UNSET": 0, + "VISIBILITY_LOCAL": 1, + "VISIBILITY_EXPORT": 2, + } +) + +func (x SymbolVisibility) Enum() *SymbolVisibility { + p := new(SymbolVisibility) + *p = x + return p +} + +func (x SymbolVisibility) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SymbolVisibility) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor() +} + +func (SymbolVisibility) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[1] +} + +func (x SymbolVisibility) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *SymbolVisibility) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = SymbolVisibility(num) + return nil +} + +// Deprecated: Use SymbolVisibility.Descriptor instead. +func (SymbolVisibility) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{1} +} + // The verification state of the extension range. type ExtensionRangeOptions_VerificationState int32 @@ -183,11 +247,11 @@ func (x ExtensionRangeOptions_VerificationState) String() string { } func (ExtensionRangeOptions_VerificationState) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor() } func (ExtensionRangeOptions_VerificationState) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[1] + return &file_google_protobuf_descriptor_proto_enumTypes[2] } func (x ExtensionRangeOptions_VerificationState) Number() protoreflect.EnumNumber { @@ -299,11 +363,11 @@ func (x FieldDescriptorProto_Type) String() string { } func (FieldDescriptorProto_Type) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor() } func (FieldDescriptorProto_Type) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[2] + return &file_google_protobuf_descriptor_proto_enumTypes[3] } func (x FieldDescriptorProto_Type) Number() protoreflect.EnumNumber { @@ -362,11 +426,11 @@ func (x FieldDescriptorProto_Label) String() string { } func (FieldDescriptorProto_Label) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor() } func (FieldDescriptorProto_Label) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[3] + return &file_google_protobuf_descriptor_proto_enumTypes[4] } func (x FieldDescriptorProto_Label) Number() protoreflect.EnumNumber { @@ -423,11 +487,11 @@ func (x FileOptions_OptimizeMode) String() string { } func (FileOptions_OptimizeMode) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() } func (FileOptions_OptimizeMode) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[4] + return &file_google_protobuf_descriptor_proto_enumTypes[5] } func (x FileOptions_OptimizeMode) Number() protoreflect.EnumNumber { @@ -489,11 +553,11 @@ func (x FieldOptions_CType) String() string { } func (FieldOptions_CType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor() } func (FieldOptions_CType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[5] + return &file_google_protobuf_descriptor_proto_enumTypes[6] } func (x FieldOptions_CType) Number() protoreflect.EnumNumber { @@ -551,11 +615,11 @@ func (x FieldOptions_JSType) String() string { } func (FieldOptions_JSType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor() } func (FieldOptions_JSType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[6] + return &file_google_protobuf_descriptor_proto_enumTypes[7] } func (x FieldOptions_JSType) Number() protoreflect.EnumNumber { @@ -611,11 +675,11 @@ func (x FieldOptions_OptionRetention) String() string { } func (FieldOptions_OptionRetention) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor() } func (FieldOptions_OptionRetention) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[7] + return &file_google_protobuf_descriptor_proto_enumTypes[8] } func (x FieldOptions_OptionRetention) Number() protoreflect.EnumNumber { @@ -694,11 +758,11 @@ func (x FieldOptions_OptionTargetType) String() string { } func (FieldOptions_OptionTargetType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor() } func (FieldOptions_OptionTargetType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[8] + return &file_google_protobuf_descriptor_proto_enumTypes[9] } func (x FieldOptions_OptionTargetType) Number() protoreflect.EnumNumber { @@ -756,11 +820,11 @@ func (x MethodOptions_IdempotencyLevel) String() string { } func (MethodOptions_IdempotencyLevel) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[10].Descriptor() } func (MethodOptions_IdempotencyLevel) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[9] + return &file_google_protobuf_descriptor_proto_enumTypes[10] } func (x MethodOptions_IdempotencyLevel) Number() protoreflect.EnumNumber { @@ -818,11 +882,11 @@ func (x FeatureSet_FieldPresence) String() string { } func (FeatureSet_FieldPresence) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[10].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[11].Descriptor() } func (FeatureSet_FieldPresence) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[10] + return &file_google_protobuf_descriptor_proto_enumTypes[11] } func (x FeatureSet_FieldPresence) Number() protoreflect.EnumNumber { @@ -877,11 +941,11 @@ func (x FeatureSet_EnumType) String() string { } func (FeatureSet_EnumType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[11].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[12].Descriptor() } func (FeatureSet_EnumType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[11] + return &file_google_protobuf_descriptor_proto_enumTypes[12] } func (x FeatureSet_EnumType) Number() protoreflect.EnumNumber { @@ -936,11 +1000,11 @@ func (x FeatureSet_RepeatedFieldEncoding) String() string { } func (FeatureSet_RepeatedFieldEncoding) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[12].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[13].Descriptor() } func (FeatureSet_RepeatedFieldEncoding) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[12] + return &file_google_protobuf_descriptor_proto_enumTypes[13] } func (x FeatureSet_RepeatedFieldEncoding) Number() protoreflect.EnumNumber { @@ -995,11 +1059,11 @@ func (x FeatureSet_Utf8Validation) String() string { } func (FeatureSet_Utf8Validation) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[13].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[14].Descriptor() } func (FeatureSet_Utf8Validation) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[13] + return &file_google_protobuf_descriptor_proto_enumTypes[14] } func (x FeatureSet_Utf8Validation) Number() protoreflect.EnumNumber { @@ -1054,11 +1118,11 @@ func (x FeatureSet_MessageEncoding) String() string { } func (FeatureSet_MessageEncoding) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[14].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[15].Descriptor() } func (FeatureSet_MessageEncoding) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[14] + return &file_google_protobuf_descriptor_proto_enumTypes[15] } func (x FeatureSet_MessageEncoding) Number() protoreflect.EnumNumber { @@ -1113,11 +1177,11 @@ func (x FeatureSet_JsonFormat) String() string { } func (FeatureSet_JsonFormat) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[15].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor() } func (FeatureSet_JsonFormat) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[15] + return &file_google_protobuf_descriptor_proto_enumTypes[16] } func (x FeatureSet_JsonFormat) Number() protoreflect.EnumNumber { @@ -1172,11 +1236,11 @@ func (x FeatureSet_EnforceNamingStyle) String() string { } func (FeatureSet_EnforceNamingStyle) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[17].Descriptor() } func (FeatureSet_EnforceNamingStyle) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[16] + return &file_google_protobuf_descriptor_proto_enumTypes[17] } func (x FeatureSet_EnforceNamingStyle) Number() protoreflect.EnumNumber { @@ -1198,6 +1262,77 @@ func (FeatureSet_EnforceNamingStyle) EnumDescriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 6} } +type FeatureSet_VisibilityFeature_DefaultSymbolVisibility int32 + +const ( + FeatureSet_VisibilityFeature_DEFAULT_SYMBOL_VISIBILITY_UNKNOWN FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 0 + // Default pre-EDITION_2024, all UNSET visibility are export. + FeatureSet_VisibilityFeature_EXPORT_ALL FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 1 + // All top-level symbols default to export, nested default to local. + FeatureSet_VisibilityFeature_EXPORT_TOP_LEVEL FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 2 + // All symbols default to local. + FeatureSet_VisibilityFeature_LOCAL_ALL FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 3 + // All symbols local by default. Nested types cannot be exported. + // With special case caveat for message { enum {} reserved 1 to max; } + // This is the recommended setting for new protos. + FeatureSet_VisibilityFeature_STRICT FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 4 +) + +// Enum value maps for FeatureSet_VisibilityFeature_DefaultSymbolVisibility. +var ( + FeatureSet_VisibilityFeature_DefaultSymbolVisibility_name = map[int32]string{ + 0: "DEFAULT_SYMBOL_VISIBILITY_UNKNOWN", + 1: "EXPORT_ALL", + 2: "EXPORT_TOP_LEVEL", + 3: "LOCAL_ALL", + 4: "STRICT", + } + FeatureSet_VisibilityFeature_DefaultSymbolVisibility_value = map[string]int32{ + "DEFAULT_SYMBOL_VISIBILITY_UNKNOWN": 0, + "EXPORT_ALL": 1, + "EXPORT_TOP_LEVEL": 2, + "LOCAL_ALL": 3, + "STRICT": 4, + } +) + +func (x FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Enum() *FeatureSet_VisibilityFeature_DefaultSymbolVisibility { + p := new(FeatureSet_VisibilityFeature_DefaultSymbolVisibility) + *p = x + return p +} + +func (x FeatureSet_VisibilityFeature_DefaultSymbolVisibility) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[18].Descriptor() +} + +func (FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[18] +} + +func (x FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_VisibilityFeature_DefaultSymbolVisibility) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_VisibilityFeature_DefaultSymbolVisibility(num) + return nil +} + +// Deprecated: Use FeatureSet_VisibilityFeature_DefaultSymbolVisibility.Descriptor instead. +func (FeatureSet_VisibilityFeature_DefaultSymbolVisibility) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0, 0} +} + // Represents the identified object's effect on the element in the original // .proto file. type GeneratedCodeInfo_Annotation_Semantic int32 @@ -1236,11 +1371,11 @@ func (x GeneratedCodeInfo_Annotation_Semantic) String() string { } func (GeneratedCodeInfo_Annotation_Semantic) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[17].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[19].Descriptor() } func (GeneratedCodeInfo_Annotation_Semantic) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[17] + return &file_google_protobuf_descriptor_proto_enumTypes[19] } func (x GeneratedCodeInfo_Annotation_Semantic) Number() protoreflect.EnumNumber { @@ -1321,6 +1456,9 @@ type FileDescriptorProto struct { // Indexes of the weak imported files in the dependency list. // For Google-internal migration only. Do not use. WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` + // Names of files imported by this file purely for the purpose of providing + // option extensions. These are excluded from the dependency list above. + OptionDependency []string `protobuf:"bytes,15,rep,name=option_dependency,json=optionDependency" json:"option_dependency,omitempty"` // All top-level definitions in this file. MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` @@ -1414,6 +1552,13 @@ func (x *FileDescriptorProto) GetWeakDependency() []int32 { return nil } +func (x *FileDescriptorProto) GetOptionDependency() []string { + if x != nil { + return x.OptionDependency + } + return nil +} + func (x *FileDescriptorProto) GetMessageType() []*DescriptorProto { if x != nil { return x.MessageType @@ -1484,7 +1629,9 @@ type DescriptorProto struct { ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` // Reserved field names, which may not be used by fields in the same message. // A given name may only be reserved once. - ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + // Support for `export` and `local` keywords on enums. + Visibility *SymbolVisibility `protobuf:"varint,11,opt,name=visibility,enum=google.protobuf.SymbolVisibility" json:"visibility,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -1589,6 +1736,13 @@ func (x *DescriptorProto) GetReservedName() []string { return nil } +func (x *DescriptorProto) GetVisibility() SymbolVisibility { + if x != nil && x.Visibility != nil { + return *x.Visibility + } + return SymbolVisibility_VISIBILITY_UNSET +} + type ExtensionRangeOptions struct { state protoimpl.MessageState `protogen:"open.v1"` // The parser stores options it doesn't recognize here. See above. @@ -1901,7 +2055,9 @@ type EnumDescriptorProto struct { ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` // Reserved enum value names, which may not be reused. A given name may only // be reserved once. - ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + // Support for `export` and `local` keywords on enums. + Visibility *SymbolVisibility `protobuf:"varint,6,opt,name=visibility,enum=google.protobuf.SymbolVisibility" json:"visibility,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -1971,6 +2127,13 @@ func (x *EnumDescriptorProto) GetReservedName() []string { return nil } +func (x *EnumDescriptorProto) GetVisibility() SymbolVisibility { + if x != nil && x.Visibility != nil { + return *x.Visibility + } + return SymbolVisibility_VISIBILITY_UNSET +} + // Describes a value within an enum. type EnumValueDescriptorProto struct { state protoimpl.MessageState `protogen:"open.v1"` @@ -2710,7 +2873,10 @@ type FieldOptions struct { // for accessors, or it will be completely ignored; in the very least, this // is a formalization for deprecating fields. Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // DEPRECATED. DO NOT USE! // For Google-internal migration only. Do not use. + // + // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` // Indicate that the field value should not be printed out when using debug // formats, e.g. when the field contains sensitive credentials. @@ -2814,6 +2980,7 @@ func (x *FieldOptions) GetDeprecated() bool { return Default_FieldOptions_Deprecated } +// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. func (x *FieldOptions) GetWeak() bool { if x != nil && x.Weak != nil { return *x.Weak @@ -3392,17 +3559,18 @@ func (x *UninterpretedOption) GetAggregateValue() string { // be designed and implemented to handle this, hopefully before we ever hit a // conflict here. type FeatureSet struct { - state protoimpl.MessageState `protogen:"open.v1"` - FieldPresence *FeatureSet_FieldPresence `protobuf:"varint,1,opt,name=field_presence,json=fieldPresence,enum=google.protobuf.FeatureSet_FieldPresence" json:"field_presence,omitempty"` - EnumType *FeatureSet_EnumType `protobuf:"varint,2,opt,name=enum_type,json=enumType,enum=google.protobuf.FeatureSet_EnumType" json:"enum_type,omitempty"` - RepeatedFieldEncoding *FeatureSet_RepeatedFieldEncoding `protobuf:"varint,3,opt,name=repeated_field_encoding,json=repeatedFieldEncoding,enum=google.protobuf.FeatureSet_RepeatedFieldEncoding" json:"repeated_field_encoding,omitempty"` - Utf8Validation *FeatureSet_Utf8Validation `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"` - MessageEncoding *FeatureSet_MessageEncoding `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"` - JsonFormat *FeatureSet_JsonFormat `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"` - EnforceNamingStyle *FeatureSet_EnforceNamingStyle `protobuf:"varint,7,opt,name=enforce_naming_style,json=enforceNamingStyle,enum=google.protobuf.FeatureSet_EnforceNamingStyle" json:"enforce_naming_style,omitempty"` - extensionFields protoimpl.ExtensionFields - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + FieldPresence *FeatureSet_FieldPresence `protobuf:"varint,1,opt,name=field_presence,json=fieldPresence,enum=google.protobuf.FeatureSet_FieldPresence" json:"field_presence,omitempty"` + EnumType *FeatureSet_EnumType `protobuf:"varint,2,opt,name=enum_type,json=enumType,enum=google.protobuf.FeatureSet_EnumType" json:"enum_type,omitempty"` + RepeatedFieldEncoding *FeatureSet_RepeatedFieldEncoding `protobuf:"varint,3,opt,name=repeated_field_encoding,json=repeatedFieldEncoding,enum=google.protobuf.FeatureSet_RepeatedFieldEncoding" json:"repeated_field_encoding,omitempty"` + Utf8Validation *FeatureSet_Utf8Validation `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"` + MessageEncoding *FeatureSet_MessageEncoding `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"` + JsonFormat *FeatureSet_JsonFormat `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"` + EnforceNamingStyle *FeatureSet_EnforceNamingStyle `protobuf:"varint,7,opt,name=enforce_naming_style,json=enforceNamingStyle,enum=google.protobuf.FeatureSet_EnforceNamingStyle" json:"enforce_naming_style,omitempty"` + DefaultSymbolVisibility *FeatureSet_VisibilityFeature_DefaultSymbolVisibility `protobuf:"varint,8,opt,name=default_symbol_visibility,json=defaultSymbolVisibility,enum=google.protobuf.FeatureSet_VisibilityFeature_DefaultSymbolVisibility" json:"default_symbol_visibility,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FeatureSet) Reset() { @@ -3484,6 +3652,13 @@ func (x *FeatureSet) GetEnforceNamingStyle() FeatureSet_EnforceNamingStyle { return FeatureSet_ENFORCE_NAMING_STYLE_UNKNOWN } +func (x *FeatureSet) GetDefaultSymbolVisibility() FeatureSet_VisibilityFeature_DefaultSymbolVisibility { + if x != nil && x.DefaultSymbolVisibility != nil { + return *x.DefaultSymbolVisibility + } + return FeatureSet_VisibilityFeature_DEFAULT_SYMBOL_VISIBILITY_UNKNOWN +} + // A compiled specification for the defaults of a set of features. These // messages are generated from FeatureSet extensions and can be used to seed // feature resolution. The resolution with this object becomes a simple search @@ -4144,6 +4319,42 @@ func (x *UninterpretedOption_NamePart) GetIsExtension() bool { return false } +type FeatureSet_VisibilityFeature struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *FeatureSet_VisibilityFeature) Reset() { + *x = FeatureSet_VisibilityFeature{} + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *FeatureSet_VisibilityFeature) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FeatureSet_VisibilityFeature) ProtoMessage() {} + +func (x *FeatureSet_VisibilityFeature) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FeatureSet_VisibilityFeature.ProtoReflect.Descriptor instead. +func (*FeatureSet_VisibilityFeature) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0} +} + // A map from every known edition with a unique set of defaults to its // defaults. Not all editions may be contained here. For a given edition, // the defaults at the closest matching edition ordered at or before it should @@ -4161,7 +4372,7 @@ type FeatureSetDefaults_FeatureSetEditionDefault struct { func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() { *x = FeatureSetDefaults_FeatureSetEditionDefault{} - mi := &file_google_protobuf_descriptor_proto_msgTypes[30] + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4173,7 +4384,7 @@ func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string { func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {} func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[30] + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4309,7 +4520,7 @@ type SourceCodeInfo_Location struct { func (x *SourceCodeInfo_Location) Reset() { *x = SourceCodeInfo_Location{} - mi := &file_google_protobuf_descriptor_proto_msgTypes[31] + mi := &file_google_protobuf_descriptor_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4321,7 +4532,7 @@ func (x *SourceCodeInfo_Location) String() string { func (*SourceCodeInfo_Location) ProtoMessage() {} func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[31] + mi := &file_google_protobuf_descriptor_proto_msgTypes[32] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4393,7 +4604,7 @@ type GeneratedCodeInfo_Annotation struct { func (x *GeneratedCodeInfo_Annotation) Reset() { *x = GeneratedCodeInfo_Annotation{} - mi := &file_google_protobuf_descriptor_proto_msgTypes[32] + mi := &file_google_protobuf_descriptor_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4405,7 +4616,7 @@ func (x *GeneratedCodeInfo_Annotation) String() string { func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[32] + mi := &file_google_protobuf_descriptor_proto_msgTypes[33] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4462,7 +4673,7 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" + "\n" + " google/protobuf/descriptor.proto\x12\x0fgoogle.protobuf\"[\n" + "\x11FileDescriptorSet\x128\n" + - "\x04file\x18\x01 \x03(\v2$.google.protobuf.FileDescriptorProtoR\x04file*\f\b\x80\xec\xca\xff\x01\x10\x81\xec\xca\xff\x01\"\x98\x05\n" + + "\x04file\x18\x01 \x03(\v2$.google.protobuf.FileDescriptorProtoR\x04file*\f\b\x80\xec\xca\xff\x01\x10\x81\xec\xca\xff\x01\"\xc5\x05\n" + "\x13FileDescriptorProto\x12\x12\n" + "\x04name\x18\x01 \x01(\tR\x04name\x12\x18\n" + "\apackage\x18\x02 \x01(\tR\apackage\x12\x1e\n" + @@ -4471,7 +4682,8 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" + "dependency\x12+\n" + "\x11public_dependency\x18\n" + " \x03(\x05R\x10publicDependency\x12'\n" + - "\x0fweak_dependency\x18\v \x03(\x05R\x0eweakDependency\x12C\n" + + "\x0fweak_dependency\x18\v \x03(\x05R\x0eweakDependency\x12+\n" + + "\x11option_dependency\x18\x0f \x03(\tR\x10optionDependency\x12C\n" + "\fmessage_type\x18\x04 \x03(\v2 .google.protobuf.DescriptorProtoR\vmessageType\x12A\n" + "\tenum_type\x18\x05 \x03(\v2$.google.protobuf.EnumDescriptorProtoR\benumType\x12A\n" + "\aservice\x18\x06 \x03(\v2'.google.protobuf.ServiceDescriptorProtoR\aservice\x12C\n" + @@ -4479,7 +4691,7 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" + "\aoptions\x18\b \x01(\v2\x1c.google.protobuf.FileOptionsR\aoptions\x12I\n" + "\x10source_code_info\x18\t \x01(\v2\x1f.google.protobuf.SourceCodeInfoR\x0esourceCodeInfo\x12\x16\n" + "\x06syntax\x18\f \x01(\tR\x06syntax\x122\n" + - "\aedition\x18\x0e \x01(\x0e2\x18.google.protobuf.EditionR\aedition\"\xb9\x06\n" + + "\aedition\x18\x0e \x01(\x0e2\x18.google.protobuf.EditionR\aedition\"\xfc\x06\n" + "\x0fDescriptorProto\x12\x12\n" + "\x04name\x18\x01 \x01(\tR\x04name\x12;\n" + "\x05field\x18\x02 \x03(\v2%.google.protobuf.FieldDescriptorProtoR\x05field\x12C\n" + @@ -4493,7 +4705,10 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" + "\aoptions\x18\a \x01(\v2\x1f.google.protobuf.MessageOptionsR\aoptions\x12U\n" + "\x0ereserved_range\x18\t \x03(\v2..google.protobuf.DescriptorProto.ReservedRangeR\rreservedRange\x12#\n" + "\rreserved_name\x18\n" + - " \x03(\tR\freservedName\x1az\n" + + " \x03(\tR\freservedName\x12A\n" + + "\n" + + "visibility\x18\v \x01(\x0e2!.google.protobuf.SymbolVisibilityR\n" + + "visibility\x1az\n" + "\x0eExtensionRange\x12\x14\n" + "\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n" + "\x03end\x18\x02 \x01(\x05R\x03end\x12@\n" + @@ -4562,13 +4777,16 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" + "\x0eLABEL_REQUIRED\x10\x02\"c\n" + "\x14OneofDescriptorProto\x12\x12\n" + "\x04name\x18\x01 \x01(\tR\x04name\x127\n" + - "\aoptions\x18\x02 \x01(\v2\x1d.google.protobuf.OneofOptionsR\aoptions\"\xe3\x02\n" + + "\aoptions\x18\x02 \x01(\v2\x1d.google.protobuf.OneofOptionsR\aoptions\"\xa6\x03\n" + "\x13EnumDescriptorProto\x12\x12\n" + "\x04name\x18\x01 \x01(\tR\x04name\x12?\n" + "\x05value\x18\x02 \x03(\v2).google.protobuf.EnumValueDescriptorProtoR\x05value\x126\n" + "\aoptions\x18\x03 \x01(\v2\x1c.google.protobuf.EnumOptionsR\aoptions\x12]\n" + "\x0ereserved_range\x18\x04 \x03(\v26.google.protobuf.EnumDescriptorProto.EnumReservedRangeR\rreservedRange\x12#\n" + - "\rreserved_name\x18\x05 \x03(\tR\freservedName\x1a;\n" + + "\rreserved_name\x18\x05 \x03(\tR\freservedName\x12A\n" + + "\n" + + "visibility\x18\x06 \x01(\x0e2!.google.protobuf.SymbolVisibilityR\n" + + "visibility\x1a;\n" + "\x11EnumReservedRange\x12\x14\n" + "\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n" + "\x03end\x18\x02 \x01(\x05R\x03end\"\x83\x01\n" + @@ -4629,7 +4847,7 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" + "&deprecated_legacy_json_field_conflicts\x18\v \x01(\bB\x02\x18\x01R\"deprecatedLegacyJsonFieldConflicts\x127\n" + "\bfeatures\x18\f \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b\x04\x10\x05J\x04\b\x05\x10\x06J\x04\b\x06\x10\aJ\x04\b\b\x10\tJ\x04\b\t\x10\n" + - "\"\x9d\r\n" + + "\"\xa1\r\n" + "\fFieldOptions\x12A\n" + "\x05ctype\x18\x01 \x01(\x0e2#.google.protobuf.FieldOptions.CType:\x06STRINGR\x05ctype\x12\x16\n" + "\x06packed\x18\x02 \x01(\bR\x06packed\x12G\n" + @@ -4638,9 +4856,9 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" + "\x0funverified_lazy\x18\x0f \x01(\b:\x05falseR\x0eunverifiedLazy\x12%\n" + "\n" + "deprecated\x18\x03 \x01(\b:\x05falseR\n" + - "deprecated\x12\x19\n" + + "deprecated\x12\x1d\n" + "\x04weak\x18\n" + - " \x01(\b:\x05falseR\x04weak\x12(\n" + + " \x01(\b:\x05falseB\x02\x18\x01R\x04weak\x12(\n" + "\fdebug_redact\x18\x10 \x01(\b:\x05falseR\vdebugRedact\x12K\n" + "\tretention\x18\x11 \x01(\x0e2-.google.protobuf.FieldOptions.OptionRetentionR\tretention\x12H\n" + "\atargets\x18\x13 \x03(\x0e2..google.protobuf.FieldOptions.OptionTargetTypeR\atargets\x12W\n" + @@ -4728,7 +4946,7 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" + "\x0faggregate_value\x18\b \x01(\tR\x0eaggregateValue\x1aJ\n" + "\bNamePart\x12\x1b\n" + "\tname_part\x18\x01 \x02(\tR\bnamePart\x12!\n" + - "\fis_extension\x18\x02 \x02(\bR\visExtension\"\xae\f\n" + + "\fis_extension\x18\x02 \x02(\bR\visExtension\"\x8e\x0f\n" + "\n" + "FeatureSet\x12\x91\x01\n" + "\x0efield_presence\x18\x01 \x01(\x0e2).google.protobuf.FeatureSet.FieldPresenceB?\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\r\x12\bEXPLICIT\x18\x84\a\xa2\x01\r\x12\bIMPLICIT\x18\xe7\a\xa2\x01\r\x12\bEXPLICIT\x18\xe8\a\xb2\x01\x03\b\xe8\aR\rfieldPresence\x12l\n" + @@ -4739,7 +4957,18 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" + "\vjson_format\x18\x06 \x01(\x0e2&.google.protobuf.FeatureSet.JsonFormatB9\x88\x01\x01\x98\x01\x03\x98\x01\x06\x98\x01\x01\xa2\x01\x17\x12\x12LEGACY_BEST_EFFORT\x18\x84\a\xa2\x01\n" + "\x12\x05ALLOW\x18\xe7\a\xb2\x01\x03\b\xe8\aR\n" + "jsonFormat\x12\xab\x01\n" + - "\x14enforce_naming_style\x18\a \x01(\x0e2..google.protobuf.FeatureSet.EnforceNamingStyleBI\x88\x01\x02\x98\x01\x01\x98\x01\x02\x98\x01\x03\x98\x01\x04\x98\x01\x05\x98\x01\x06\x98\x01\a\x98\x01\b\x98\x01\t\xa2\x01\x11\x12\fSTYLE_LEGACY\x18\x84\a\xa2\x01\x0e\x12\tSTYLE2024\x18\xe9\a\xb2\x01\x03\b\xe9\aR\x12enforceNamingStyle\"\\\n" + + "\x14enforce_naming_style\x18\a \x01(\x0e2..google.protobuf.FeatureSet.EnforceNamingStyleBI\x88\x01\x02\x98\x01\x01\x98\x01\x02\x98\x01\x03\x98\x01\x04\x98\x01\x05\x98\x01\x06\x98\x01\a\x98\x01\b\x98\x01\t\xa2\x01\x11\x12\fSTYLE_LEGACY\x18\x84\a\xa2\x01\x0e\x12\tSTYLE2024\x18\xe9\a\xb2\x01\x03\b\xe9\aR\x12enforceNamingStyle\x12\xb9\x01\n" + + "\x19default_symbol_visibility\x18\b \x01(\x0e2E.google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibilityB6\x88\x01\x02\x98\x01\x01\xa2\x01\x0f\x12\n" + + "EXPORT_ALL\x18\x84\a\xa2\x01\x15\x12\x10EXPORT_TOP_LEVEL\x18\xe9\a\xb2\x01\x03\b\xe9\aR\x17defaultSymbolVisibility\x1a\xa1\x01\n" + + "\x11VisibilityFeature\"\x81\x01\n" + + "\x17DefaultSymbolVisibility\x12%\n" + + "!DEFAULT_SYMBOL_VISIBILITY_UNKNOWN\x10\x00\x12\x0e\n" + + "\n" + + "EXPORT_ALL\x10\x01\x12\x14\n" + + "\x10EXPORT_TOP_LEVEL\x10\x02\x12\r\n" + + "\tLOCAL_ALL\x10\x03\x12\n" + + "\n" + + "\x06STRICT\x10\x04J\b\b\x01\x10\x80\x80\x80\x80\x02\"\\\n" + "\rFieldPresence\x12\x1a\n" + "\x16FIELD_PRESENCE_UNKNOWN\x10\x00\x12\f\n" + "\bEXPLICIT\x10\x01\x12\f\n" + @@ -4817,7 +5046,11 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" + "\x17EDITION_99997_TEST_ONLY\x10\x9d\x8d\x06\x12\x1d\n" + "\x17EDITION_99998_TEST_ONLY\x10\x9e\x8d\x06\x12\x1d\n" + "\x17EDITION_99999_TEST_ONLY\x10\x9f\x8d\x06\x12\x13\n" + - "\vEDITION_MAX\x10\xff\xff\xff\xff\aB~\n" + + "\vEDITION_MAX\x10\xff\xff\xff\xff\a*U\n" + + "\x10SymbolVisibility\x12\x14\n" + + "\x10VISIBILITY_UNSET\x10\x00\x12\x14\n" + + "\x10VISIBILITY_LOCAL\x10\x01\x12\x15\n" + + "\x11VISIBILITY_EXPORT\x10\x02B~\n" + "\x13com.google.protobufB\x10DescriptorProtosH\x01Z-google.golang.org/protobuf/types/descriptorpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1aGoogle.Protobuf.Reflection" var ( @@ -4832,145 +5065,151 @@ func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { return file_google_protobuf_descriptor_proto_rawDescData } -var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 18) -var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 33) +var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 20) +var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 34) var file_google_protobuf_descriptor_proto_goTypes = []any{ - (Edition)(0), // 0: google.protobuf.Edition - (ExtensionRangeOptions_VerificationState)(0), // 1: google.protobuf.ExtensionRangeOptions.VerificationState - (FieldDescriptorProto_Type)(0), // 2: google.protobuf.FieldDescriptorProto.Type - (FieldDescriptorProto_Label)(0), // 3: google.protobuf.FieldDescriptorProto.Label - (FileOptions_OptimizeMode)(0), // 4: google.protobuf.FileOptions.OptimizeMode - (FieldOptions_CType)(0), // 5: google.protobuf.FieldOptions.CType - (FieldOptions_JSType)(0), // 6: google.protobuf.FieldOptions.JSType - (FieldOptions_OptionRetention)(0), // 7: google.protobuf.FieldOptions.OptionRetention - (FieldOptions_OptionTargetType)(0), // 8: google.protobuf.FieldOptions.OptionTargetType - (MethodOptions_IdempotencyLevel)(0), // 9: google.protobuf.MethodOptions.IdempotencyLevel - (FeatureSet_FieldPresence)(0), // 10: google.protobuf.FeatureSet.FieldPresence - (FeatureSet_EnumType)(0), // 11: google.protobuf.FeatureSet.EnumType - (FeatureSet_RepeatedFieldEncoding)(0), // 12: google.protobuf.FeatureSet.RepeatedFieldEncoding - (FeatureSet_Utf8Validation)(0), // 13: google.protobuf.FeatureSet.Utf8Validation - (FeatureSet_MessageEncoding)(0), // 14: google.protobuf.FeatureSet.MessageEncoding - (FeatureSet_JsonFormat)(0), // 15: google.protobuf.FeatureSet.JsonFormat - (FeatureSet_EnforceNamingStyle)(0), // 16: google.protobuf.FeatureSet.EnforceNamingStyle - (GeneratedCodeInfo_Annotation_Semantic)(0), // 17: google.protobuf.GeneratedCodeInfo.Annotation.Semantic - (*FileDescriptorSet)(nil), // 18: google.protobuf.FileDescriptorSet - (*FileDescriptorProto)(nil), // 19: google.protobuf.FileDescriptorProto - (*DescriptorProto)(nil), // 20: google.protobuf.DescriptorProto - (*ExtensionRangeOptions)(nil), // 21: google.protobuf.ExtensionRangeOptions - (*FieldDescriptorProto)(nil), // 22: google.protobuf.FieldDescriptorProto - (*OneofDescriptorProto)(nil), // 23: google.protobuf.OneofDescriptorProto - (*EnumDescriptorProto)(nil), // 24: google.protobuf.EnumDescriptorProto - (*EnumValueDescriptorProto)(nil), // 25: google.protobuf.EnumValueDescriptorProto - (*ServiceDescriptorProto)(nil), // 26: google.protobuf.ServiceDescriptorProto - (*MethodDescriptorProto)(nil), // 27: google.protobuf.MethodDescriptorProto - (*FileOptions)(nil), // 28: google.protobuf.FileOptions - (*MessageOptions)(nil), // 29: google.protobuf.MessageOptions - (*FieldOptions)(nil), // 30: google.protobuf.FieldOptions - (*OneofOptions)(nil), // 31: google.protobuf.OneofOptions - (*EnumOptions)(nil), // 32: google.protobuf.EnumOptions - (*EnumValueOptions)(nil), // 33: google.protobuf.EnumValueOptions - (*ServiceOptions)(nil), // 34: google.protobuf.ServiceOptions - (*MethodOptions)(nil), // 35: google.protobuf.MethodOptions - (*UninterpretedOption)(nil), // 36: google.protobuf.UninterpretedOption - (*FeatureSet)(nil), // 37: google.protobuf.FeatureSet - (*FeatureSetDefaults)(nil), // 38: google.protobuf.FeatureSetDefaults - (*SourceCodeInfo)(nil), // 39: google.protobuf.SourceCodeInfo - (*GeneratedCodeInfo)(nil), // 40: google.protobuf.GeneratedCodeInfo - (*DescriptorProto_ExtensionRange)(nil), // 41: google.protobuf.DescriptorProto.ExtensionRange - (*DescriptorProto_ReservedRange)(nil), // 42: google.protobuf.DescriptorProto.ReservedRange - (*ExtensionRangeOptions_Declaration)(nil), // 43: google.protobuf.ExtensionRangeOptions.Declaration - (*EnumDescriptorProto_EnumReservedRange)(nil), // 44: google.protobuf.EnumDescriptorProto.EnumReservedRange - (*FieldOptions_EditionDefault)(nil), // 45: google.protobuf.FieldOptions.EditionDefault - (*FieldOptions_FeatureSupport)(nil), // 46: google.protobuf.FieldOptions.FeatureSupport - (*UninterpretedOption_NamePart)(nil), // 47: google.protobuf.UninterpretedOption.NamePart - (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 48: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault - (*SourceCodeInfo_Location)(nil), // 49: google.protobuf.SourceCodeInfo.Location - (*GeneratedCodeInfo_Annotation)(nil), // 50: google.protobuf.GeneratedCodeInfo.Annotation + (Edition)(0), // 0: google.protobuf.Edition + (SymbolVisibility)(0), // 1: google.protobuf.SymbolVisibility + (ExtensionRangeOptions_VerificationState)(0), // 2: google.protobuf.ExtensionRangeOptions.VerificationState + (FieldDescriptorProto_Type)(0), // 3: google.protobuf.FieldDescriptorProto.Type + (FieldDescriptorProto_Label)(0), // 4: google.protobuf.FieldDescriptorProto.Label + (FileOptions_OptimizeMode)(0), // 5: google.protobuf.FileOptions.OptimizeMode + (FieldOptions_CType)(0), // 6: google.protobuf.FieldOptions.CType + (FieldOptions_JSType)(0), // 7: google.protobuf.FieldOptions.JSType + (FieldOptions_OptionRetention)(0), // 8: google.protobuf.FieldOptions.OptionRetention + (FieldOptions_OptionTargetType)(0), // 9: google.protobuf.FieldOptions.OptionTargetType + (MethodOptions_IdempotencyLevel)(0), // 10: google.protobuf.MethodOptions.IdempotencyLevel + (FeatureSet_FieldPresence)(0), // 11: google.protobuf.FeatureSet.FieldPresence + (FeatureSet_EnumType)(0), // 12: google.protobuf.FeatureSet.EnumType + (FeatureSet_RepeatedFieldEncoding)(0), // 13: google.protobuf.FeatureSet.RepeatedFieldEncoding + (FeatureSet_Utf8Validation)(0), // 14: google.protobuf.FeatureSet.Utf8Validation + (FeatureSet_MessageEncoding)(0), // 15: google.protobuf.FeatureSet.MessageEncoding + (FeatureSet_JsonFormat)(0), // 16: google.protobuf.FeatureSet.JsonFormat + (FeatureSet_EnforceNamingStyle)(0), // 17: google.protobuf.FeatureSet.EnforceNamingStyle + (FeatureSet_VisibilityFeature_DefaultSymbolVisibility)(0), // 18: google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility + (GeneratedCodeInfo_Annotation_Semantic)(0), // 19: google.protobuf.GeneratedCodeInfo.Annotation.Semantic + (*FileDescriptorSet)(nil), // 20: google.protobuf.FileDescriptorSet + (*FileDescriptorProto)(nil), // 21: google.protobuf.FileDescriptorProto + (*DescriptorProto)(nil), // 22: google.protobuf.DescriptorProto + (*ExtensionRangeOptions)(nil), // 23: google.protobuf.ExtensionRangeOptions + (*FieldDescriptorProto)(nil), // 24: google.protobuf.FieldDescriptorProto + (*OneofDescriptorProto)(nil), // 25: google.protobuf.OneofDescriptorProto + (*EnumDescriptorProto)(nil), // 26: google.protobuf.EnumDescriptorProto + (*EnumValueDescriptorProto)(nil), // 27: google.protobuf.EnumValueDescriptorProto + (*ServiceDescriptorProto)(nil), // 28: google.protobuf.ServiceDescriptorProto + (*MethodDescriptorProto)(nil), // 29: google.protobuf.MethodDescriptorProto + (*FileOptions)(nil), // 30: google.protobuf.FileOptions + (*MessageOptions)(nil), // 31: google.protobuf.MessageOptions + (*FieldOptions)(nil), // 32: google.protobuf.FieldOptions + (*OneofOptions)(nil), // 33: google.protobuf.OneofOptions + (*EnumOptions)(nil), // 34: google.protobuf.EnumOptions + (*EnumValueOptions)(nil), // 35: google.protobuf.EnumValueOptions + (*ServiceOptions)(nil), // 36: google.protobuf.ServiceOptions + (*MethodOptions)(nil), // 37: google.protobuf.MethodOptions + (*UninterpretedOption)(nil), // 38: google.protobuf.UninterpretedOption + (*FeatureSet)(nil), // 39: google.protobuf.FeatureSet + (*FeatureSetDefaults)(nil), // 40: google.protobuf.FeatureSetDefaults + (*SourceCodeInfo)(nil), // 41: google.protobuf.SourceCodeInfo + (*GeneratedCodeInfo)(nil), // 42: google.protobuf.GeneratedCodeInfo + (*DescriptorProto_ExtensionRange)(nil), // 43: google.protobuf.DescriptorProto.ExtensionRange + (*DescriptorProto_ReservedRange)(nil), // 44: google.protobuf.DescriptorProto.ReservedRange + (*ExtensionRangeOptions_Declaration)(nil), // 45: google.protobuf.ExtensionRangeOptions.Declaration + (*EnumDescriptorProto_EnumReservedRange)(nil), // 46: google.protobuf.EnumDescriptorProto.EnumReservedRange + (*FieldOptions_EditionDefault)(nil), // 47: google.protobuf.FieldOptions.EditionDefault + (*FieldOptions_FeatureSupport)(nil), // 48: google.protobuf.FieldOptions.FeatureSupport + (*UninterpretedOption_NamePart)(nil), // 49: google.protobuf.UninterpretedOption.NamePart + (*FeatureSet_VisibilityFeature)(nil), // 50: google.protobuf.FeatureSet.VisibilityFeature + (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 51: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + (*SourceCodeInfo_Location)(nil), // 52: google.protobuf.SourceCodeInfo.Location + (*GeneratedCodeInfo_Annotation)(nil), // 53: google.protobuf.GeneratedCodeInfo.Annotation } var file_google_protobuf_descriptor_proto_depIdxs = []int32{ - 19, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto - 20, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto - 24, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 26, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto - 22, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 28, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions - 39, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo + 21, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto + 22, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto + 26, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 28, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto + 24, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 30, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions + 41, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo 0, // 7: google.protobuf.FileDescriptorProto.edition:type_name -> google.protobuf.Edition - 22, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto - 22, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 20, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto - 24, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 41, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange - 23, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto - 29, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions - 42, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange - 36, // 16: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 43, // 17: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration - 37, // 18: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet - 1, // 19: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState - 3, // 20: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label - 2, // 21: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type - 30, // 22: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions - 31, // 23: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions - 25, // 24: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto - 32, // 25: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions - 44, // 26: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange - 33, // 27: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions - 27, // 28: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto - 34, // 29: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions - 35, // 30: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions - 4, // 31: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode - 37, // 32: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet - 36, // 33: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 37, // 34: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet - 36, // 35: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 5, // 36: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType - 6, // 37: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType - 7, // 38: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention - 8, // 39: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType - 45, // 40: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault - 37, // 41: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet - 46, // 42: google.protobuf.FieldOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport - 36, // 43: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 37, // 44: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet - 36, // 45: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 37, // 46: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet - 36, // 47: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 37, // 48: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet - 46, // 49: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport - 36, // 50: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 37, // 51: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet - 36, // 52: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 9, // 53: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel - 37, // 54: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet - 36, // 55: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 47, // 56: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart - 10, // 57: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence - 11, // 58: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType - 12, // 59: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding - 13, // 60: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation - 14, // 61: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding - 15, // 62: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat - 16, // 63: google.protobuf.FeatureSet.enforce_naming_style:type_name -> google.protobuf.FeatureSet.EnforceNamingStyle - 48, // 64: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault - 0, // 65: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition - 0, // 66: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition - 49, // 67: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location - 50, // 68: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation - 21, // 69: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions - 0, // 70: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition - 0, // 71: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition - 0, // 72: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition - 0, // 73: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition - 0, // 74: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition - 37, // 75: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet - 37, // 76: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet - 17, // 77: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic - 78, // [78:78] is the sub-list for method output_type - 78, // [78:78] is the sub-list for method input_type - 78, // [78:78] is the sub-list for extension type_name - 78, // [78:78] is the sub-list for extension extendee - 0, // [0:78] is the sub-list for field type_name + 24, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto + 24, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 22, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto + 26, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 43, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange + 25, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto + 31, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions + 44, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange + 1, // 16: google.protobuf.DescriptorProto.visibility:type_name -> google.protobuf.SymbolVisibility + 38, // 17: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 45, // 18: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration + 39, // 19: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet + 2, // 20: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState + 4, // 21: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label + 3, // 22: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type + 32, // 23: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions + 33, // 24: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions + 27, // 25: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto + 34, // 26: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions + 46, // 27: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange + 1, // 28: google.protobuf.EnumDescriptorProto.visibility:type_name -> google.protobuf.SymbolVisibility + 35, // 29: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions + 29, // 30: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto + 36, // 31: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions + 37, // 32: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions + 5, // 33: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode + 39, // 34: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet + 38, // 35: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 39, // 36: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet + 38, // 37: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 6, // 38: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType + 7, // 39: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType + 8, // 40: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention + 9, // 41: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType + 47, // 42: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault + 39, // 43: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet + 48, // 44: google.protobuf.FieldOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport + 38, // 45: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 39, // 46: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet + 38, // 47: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 39, // 48: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet + 38, // 49: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 39, // 50: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet + 48, // 51: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport + 38, // 52: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 39, // 53: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet + 38, // 54: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 10, // 55: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel + 39, // 56: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet + 38, // 57: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 49, // 58: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart + 11, // 59: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence + 12, // 60: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType + 13, // 61: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding + 14, // 62: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation + 15, // 63: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding + 16, // 64: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat + 17, // 65: google.protobuf.FeatureSet.enforce_naming_style:type_name -> google.protobuf.FeatureSet.EnforceNamingStyle + 18, // 66: google.protobuf.FeatureSet.default_symbol_visibility:type_name -> google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility + 51, // 67: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + 0, // 68: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition + 0, // 69: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition + 52, // 70: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location + 53, // 71: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation + 23, // 72: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions + 0, // 73: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition + 0, // 74: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition + 0, // 75: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition + 0, // 76: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition + 0, // 77: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition + 39, // 78: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet + 39, // 79: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet + 19, // 80: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic + 81, // [81:81] is the sub-list for method output_type + 81, // [81:81] is the sub-list for method input_type + 81, // [81:81] is the sub-list for extension type_name + 81, // [81:81] is the sub-list for extension extendee + 0, // [0:81] is the sub-list for field type_name } func init() { file_google_protobuf_descriptor_proto_init() } @@ -4983,8 +5222,8 @@ func file_google_protobuf_descriptor_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_descriptor_proto_rawDesc), len(file_google_protobuf_descriptor_proto_rawDesc)), - NumEnums: 18, - NumMessages: 33, + NumEnums: 20, + NumMessages: 34, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/modules.txt b/vendor/modules.txt index b499ae80b..281c5c4d5 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -181,13 +181,13 @@ github.com/containerd/ttrpc # github.com/containerd/typeurl/v2 v2.2.3 ## explicit; go 1.21 github.com/containerd/typeurl/v2 -# github.com/containers/common v0.63.1 +# github.com/containers/common v0.64.1 ## explicit; go 1.23.3 github.com/containers/common/pkg/auth github.com/containers/common/pkg/capabilities github.com/containers/common/pkg/completion github.com/containers/common/pkg/password -# github.com/containers/image/v5 v5.36.1 +# github.com/containers/image/v5 v5.36.2 ## explicit; go 1.23.3 github.com/containers/image/v5/copy github.com/containers/image/v5/directory/explicitfilepath @@ -293,7 +293,7 @@ github.com/davecgh/go-spew/spew # github.com/distribution/reference v0.6.0 ## explicit; go 1.20 github.com/distribution/reference -# github.com/docker/cli v28.3.2+incompatible +# github.com/docker/cli v28.3.3+incompatible ## explicit github.com/docker/cli/cli/config github.com/docker/cli/cli/config/configfile @@ -366,8 +366,8 @@ github.com/go-git/go-git/v5/utils/ioutil # github.com/go-gorp/gorp/v3 v3.1.0 ## explicit; go 1.18 github.com/go-gorp/gorp/v3 -# github.com/go-jose/go-jose/v4 v4.1.0 -## explicit; go 1.24 +# github.com/go-jose/go-jose/v4 v4.1.1 +## explicit; go 1.23.0 github.com/go-jose/go-jose/v4 github.com/go-jose/go-jose/v4/cipher github.com/go-jose/go-jose/v4/json @@ -512,6 +512,9 @@ github.com/gorilla/websocket github.com/gosuri/uitable github.com/gosuri/uitable/util/strutil github.com/gosuri/uitable/util/wordwrap +# github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc +## explicit; go 1.21 +github.com/grafana/regexp # github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 ## explicit github.com/gregjones/httpcache @@ -611,7 +614,7 @@ github.com/mattn/go-isatty # github.com/mattn/go-runewidth v0.0.16 ## explicit; go 1.9 github.com/mattn/go-runewidth -# github.com/mattn/go-sqlite3 v1.14.28 +# github.com/mattn/go-sqlite3 v1.14.32 ## explicit; go 1.19 github.com/mattn/go-sqlite3 # github.com/miekg/pkcs11 v1.1.1 @@ -670,7 +673,7 @@ github.com/munnerz/goautoneg # github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f ## explicit github.com/mxk/go-flowrate/flowrate -# github.com/onsi/gomega v1.37.0 +# github.com/onsi/gomega v1.38.2 ## explicit; go 1.23.0 github.com/onsi/gomega/gstruct/errors github.com/onsi/gomega/types @@ -684,7 +687,7 @@ github.com/opencontainers/image-spec/specs-go/v1 # github.com/opencontainers/runtime-spec v1.2.1 ## explicit github.com/opencontainers/runtime-spec/specs-go -# github.com/operator-framework/api v0.33.0 +# github.com/operator-framework/api v0.34.0 ## explicit; go 1.24.4 github.com/operator-framework/api/pkg/constraints github.com/operator-framework/api/pkg/encoding @@ -709,8 +712,8 @@ github.com/operator-framework/helm-operator-plugins/pkg/storage github.com/operator-framework/operator-lib/handler github.com/operator-framework/operator-lib/handler/internal/metrics github.com/operator-framework/operator-lib/internal/annotation -# github.com/operator-framework/operator-registry v1.56.0 -## explicit; go 1.24.3 +# github.com/operator-framework/operator-registry v1.57.0 +## explicit; go 1.24.4 github.com/operator-framework/operator-registry/alpha/declcfg github.com/operator-framework/operator-registry/alpha/model github.com/operator-framework/operator-registry/alpha/property @@ -743,7 +746,7 @@ github.com/pmezard/go-difflib/difflib # github.com/proglottis/gpgme v0.1.4 ## explicit; go 1.17 github.com/proglottis/gpgme -# github.com/prometheus/client_golang v1.23.0 +# github.com/prometheus/client_golang v1.23.1 ## explicit; go 1.23.0 github.com/prometheus/client_golang/api github.com/prometheus/client_golang/api/prometheus/v1 @@ -757,7 +760,7 @@ github.com/prometheus/client_golang/prometheus/promhttp/internal # github.com/prometheus/client_model v0.6.2 ## explicit; go 1.22.0 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.65.0 +# github.com/prometheus/common v0.66.0 ## explicit; go 1.23.0 github.com/prometheus/common/expfmt github.com/prometheus/common/model @@ -808,10 +811,10 @@ github.com/smallstep/pkcs7/internal/legacy/x509 # github.com/spf13/cast v1.7.1 ## explicit; go 1.19 github.com/spf13/cast -# github.com/spf13/cobra v1.9.1 +# github.com/spf13/cobra v1.10.1 ## explicit; go 1.15 github.com/spf13/cobra -# github.com/spf13/pflag v1.0.7 +# github.com/spf13/pflag v1.0.9 ## explicit; go 1.12 github.com/spf13/pflag # github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 @@ -823,7 +826,7 @@ github.com/stoewer/go-strcase # github.com/stretchr/objx v0.5.2 ## explicit; go 1.20 github.com/stretchr/objx -# github.com/stretchr/testify v1.10.0 +# github.com/stretchr/testify v1.11.1 ## explicit; go 1.17 github.com/stretchr/testify/assert github.com/stretchr/testify/assert/yaml @@ -832,7 +835,7 @@ github.com/stretchr/testify/require # github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 ## explicit github.com/titanous/rocacheck -# github.com/ulikunitz/xz v0.5.12 +# github.com/ulikunitz/xz v0.5.14 ## explicit; go 1.12 github.com/ulikunitz/xz github.com/ulikunitz/xz/internal/hash @@ -855,7 +858,7 @@ github.com/x448/float16 # github.com/xlab/treeprint v1.2.0 ## explicit; go 1.13 github.com/xlab/treeprint -# go.etcd.io/bbolt v1.4.2 +# go.etcd.io/bbolt v1.4.3 ## explicit; go 1.23 go.etcd.io/bbolt go.etcd.io/bbolt/errors @@ -878,7 +881,7 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil -# go.opentelemetry.io/otel v1.36.0 +# go.opentelemetry.io/otel v1.37.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute @@ -892,6 +895,7 @@ go.opentelemetry.io/otel/semconv/v1.17.0 go.opentelemetry.io/otel/semconv/v1.20.0 go.opentelemetry.io/otel/semconv/v1.21.0 go.opentelemetry.io/otel/semconv/v1.26.0 +go.opentelemetry.io/otel/semconv/v1.34.0 # go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace @@ -903,12 +907,12 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry -# go.opentelemetry.io/otel/metric v1.36.0 +# go.opentelemetry.io/otel/metric v1.37.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded go.opentelemetry.io/otel/metric/noop -# go.opentelemetry.io/otel/sdk v1.36.0 +# go.opentelemetry.io/otel/sdk v1.37.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/sdk go.opentelemetry.io/otel/sdk/instrumentation @@ -916,7 +920,7 @@ go.opentelemetry.io/otel/sdk/internal/env go.opentelemetry.io/otel/sdk/internal/x go.opentelemetry.io/otel/sdk/resource go.opentelemetry.io/otel/sdk/trace -# go.opentelemetry.io/otel/trace v1.36.0 +# go.opentelemetry.io/otel/trace v1.37.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded @@ -931,8 +935,8 @@ go.opentelemetry.io/proto/otlp/trace/v1 # go.yaml.in/yaml/v2 v2.4.2 ## explicit; go 1.15 go.yaml.in/yaml/v2 -# go.yaml.in/yaml/v3 v3.0.3 -## explicit; go 1.22 +# go.yaml.in/yaml/v3 v3.0.4 +## explicit; go 1.16 go.yaml.in/yaml/v3 # golang.org/x/crypto v0.41.0 ## explicit; go 1.23.0 @@ -1070,17 +1074,17 @@ gomodules.xyz/jsonpatch/v2 # google.golang.org/genproto v0.0.0-20250603155806-513f23925822 ## explicit; go 1.23.0 google.golang.org/genproto/protobuf/field_mask -# google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 +# google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7 ## explicit; go 1.23.0 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/expr/v1alpha1 google.golang.org/genproto/googleapis/api/httpbody -# google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 ## explicit; go 1.23.0 google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.73.0 +# google.golang.org/grpc v1.75.0 ## explicit; go 1.23.0 google.golang.org/grpc google.golang.org/grpc/attributes @@ -1144,8 +1148,8 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.36.6 -## explicit; go 1.22 +# google.golang.org/protobuf v1.36.8 +## explicit; go 1.23 google.golang.org/protobuf/encoding/protodelim google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext @@ -1243,7 +1247,7 @@ helm.sh/helm/v3/pkg/storage/driver helm.sh/helm/v3/pkg/time helm.sh/helm/v3/pkg/time/ctime helm.sh/helm/v3/pkg/uploader -# k8s.io/api v0.33.3 => k8s.io/api v0.33.2 +# k8s.io/api v0.33.4 => k8s.io/api v0.33.2 ## explicit; go 1.24.0 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -1305,7 +1309,7 @@ k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 k8s.io/api/storagemigration/v1alpha1 -# k8s.io/apiextensions-apiserver v0.33.3 => k8s.io/apiextensions-apiserver v0.33.2 +# k8s.io/apiextensions-apiserver v0.33.4 => k8s.io/apiextensions-apiserver v0.33.2 ## explicit; go 1.24.0 k8s.io/apiextensions-apiserver/pkg/apihelpers k8s.io/apiextensions-apiserver/pkg/apis/apiextensions @@ -1327,7 +1331,7 @@ k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1 k8s.io/apiextensions-apiserver/pkg/features -# k8s.io/apimachinery v0.33.3 => k8s.io/apimachinery v0.33.2 +# k8s.io/apimachinery v0.33.4 => k8s.io/apimachinery v0.33.2 ## explicit; go 1.24.0 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -1398,7 +1402,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.33.3 => k8s.io/apiserver v0.33.2 +# k8s.io/apiserver v0.33.4 => k8s.io/apiserver v0.33.2 ## explicit; go 1.24.0 k8s.io/apiserver/pkg/apis/apiserver k8s.io/apiserver/pkg/apis/apiserver/install @@ -1453,7 +1457,7 @@ k8s.io/cli-runtime/pkg/genericclioptions k8s.io/cli-runtime/pkg/genericiooptions k8s.io/cli-runtime/pkg/printers k8s.io/cli-runtime/pkg/resource -# k8s.io/client-go v0.33.3 => k8s.io/client-go v0.33.2 +# k8s.io/client-go v0.33.4 => k8s.io/client-go v0.33.2 ## explicit; go 1.24.0 k8s.io/client-go/applyconfigurations k8s.io/client-go/applyconfigurations/admissionregistration/v1 @@ -1817,7 +1821,7 @@ k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/watchlist k8s.io/client-go/util/workqueue -# k8s.io/component-base v0.33.3 => k8s.io/component-base v0.33.2 +# k8s.io/component-base v0.33.4 => k8s.io/component-base v0.33.2 ## explicit; go 1.24.0 k8s.io/component-base/cli/flag k8s.io/component-base/compatibility @@ -1995,7 +1999,7 @@ sigs.k8s.io/controller-tools/pkg/genall sigs.k8s.io/controller-tools/pkg/loader sigs.k8s.io/controller-tools/pkg/markers sigs.k8s.io/controller-tools/pkg/version -# sigs.k8s.io/crdify v0.4.1-0.20250613143457-398e4483fb58 +# sigs.k8s.io/crdify v0.5.0 ## explicit; go 1.24.0 sigs.k8s.io/crdify/pkg/config sigs.k8s.io/crdify/pkg/runner diff --git a/vendor/sigs.k8s.io/crdify/pkg/validations/util.go b/vendor/sigs.k8s.io/crdify/pkg/validations/util.go index 4d10bd738..85a5bb699 100644 --- a/vendor/sigs.k8s.io/crdify/pkg/validations/util.go +++ b/vendor/sigs.k8s.io/crdify/pkg/validations/util.go @@ -77,8 +77,7 @@ func FlattenedCRDVersionDiff(a, b map[string]*apiextensionsv1.JSONSchemaProps) m // based on changes to the children properties. The changes to the children // properties should still be evaluated since we are looping through a flattened // map of all the properties for the CRD version - oldSchemaCopy := oldSchema.DeepCopy() - oldSchemaCopy.Properties = nil + oldSchemaCopy := DropChildrenPropertiesFromJSONSchema(oldSchema) newSchema, ok := b[prop] // In the event the property no longer exists on the new version @@ -91,8 +90,7 @@ func FlattenedCRDVersionDiff(a, b map[string]*apiextensionsv1.JSONSchemaProps) m // Do the same copy and unset logic for the new schema properties // before comparison to ensure we are only comparing the individual properties - newSchemaCopy := newSchema.DeepCopy() - newSchemaCopy.Properties = nil + newSchemaCopy := DropChildrenPropertiesFromJSONSchema(newSchema) if !equality.Semantic.DeepEqual(oldSchemaCopy, newSchemaCopy) { diffMap[prop] = Diff{Old: oldSchemaCopy, New: newSchemaCopy} @@ -102,6 +100,20 @@ func FlattenedCRDVersionDiff(a, b map[string]*apiextensionsv1.JSONSchemaProps) m return diffMap } +// DropChildrenPropertiesFromJSONSchema sets properties on a schema +// associated with children schemas to `nil`. Useful when calculating +// differences between a before and after of a given schema +// without the changes to its children schemas influencing the +// diff calculation. +// Returns a copy of the provided apiextensionsv1.JSONSchemaProps with children schemas dropped. +func DropChildrenPropertiesFromJSONSchema(schema *apiextensionsv1.JSONSchemaProps) *apiextensionsv1.JSONSchemaProps { + schemaCopy := schema.DeepCopy() + schemaCopy.Properties = nil + schemaCopy.Items = nil + + return schemaCopy +} + // SchemaWalkerFunc is a function that walks a JSONSchemaProps. // ancestry is an order list of ancestors of s, where index 0 is the root and index len-1 is the direct parent. type SchemaWalkerFunc func(s *apiextensionsv1.JSONSchemaProps, fldPath, simpleLocation *field.Path, ancestry []*apiextensionsv1.JSONSchemaProps) bool diff --git a/vendor/sigs.k8s.io/crdify/pkg/validators/version/served/served.go b/vendor/sigs.k8s.io/crdify/pkg/validators/version/served/served.go index 36c99c43e..bd6b4ff0d 100644 --- a/vendor/sigs.k8s.io/crdify/pkg/validators/version/served/served.go +++ b/vendor/sigs.k8s.io/crdify/pkg/validators/version/served/served.go @@ -81,7 +81,7 @@ func New(opts ...ValidatorOption) *Validator { } // Validate runs the validations configured in the Validator. -func (v *Validator) Validate(_, b *apiextensionsv1.CustomResourceDefinition) map[string]map[string][]validations.ComparisonResult { +func (v *Validator) Validate(a, b *apiextensionsv1.CustomResourceDefinition) map[string]map[string][]validations.ComparisonResult { result := map[string]map[string][]validations.ComparisonResult{} // If conversion webhook is specified and conversion policy is ignore, pass check @@ -89,24 +89,117 @@ func (v *Validator) Validate(_, b *apiextensionsv1.CustomResourceDefinition) map return result } - servedVersions := []apiextensionsv1.CustomResourceDefinitionVersion{} + aResults := v.compareVersionPairs(a) + bResults := v.compareVersionPairs(b) + subtractExistingIssues(bResults, aResults) - for _, version := range b.Spec.Versions { + return bResults +} + +func (v *Validator) compareVersionPairs(crd *apiextensionsv1.CustomResourceDefinition) map[string]map[string][]validations.ComparisonResult { + result := map[string]map[string][]validations.ComparisonResult{} + + for resultVersionPair, versions := range makeVersionPairs(crd) { + result[resultVersionPair] = validations.CompareVersions(versions[0], versions[1], v.unhandledEnforcement, v.comparators...) + } + + return result +} + +func makeVersionPairs(crd *apiextensionsv1.CustomResourceDefinition) map[string][2]apiextensionsv1.CustomResourceDefinitionVersion { + servedVersions := make([]apiextensionsv1.CustomResourceDefinitionVersion, 0, len(crd.Spec.Versions)) + + for _, version := range crd.Spec.Versions { if version.Served { servedVersions = append(servedVersions, version) } } + if len(servedVersions) < 2 { + return nil + } + slices.SortFunc(servedVersions, func(a, b apiextensionsv1.CustomResourceDefinitionVersion) int { return versionhelper.CompareKubeAwareVersionStrings(a.Name, b.Name) }) - for i, oldVersion := range servedVersions[:len(servedVersions)-1] { - for _, newVersion := range servedVersions[i+1:] { - resultVersion := fmt.Sprintf("%s <-> %s", oldVersion.Name, newVersion.Name) - result[resultVersion] = validations.CompareVersions(oldVersion, newVersion, v.unhandledEnforcement, v.comparators...) + pairs := make(map[string][2]apiextensionsv1.CustomResourceDefinitionVersion, numUnidirectionalPermutations(servedVersions)) + + for i, iVersion := range servedVersions[:len(servedVersions)-1] { + for _, jVersion := range servedVersions[i+1:] { + resultVersionPair := fmt.Sprintf("%s -> %s", iVersion.Name, jVersion.Name) + pairs[resultVersionPair] = [2]apiextensionsv1.CustomResourceDefinitionVersion{iVersion, jVersion} } } - return result + return pairs +} + +func numUnidirectionalPermutations[T any](in []T) int { + n := len(in) + + return (n * (n - 1)) / 2 +} + +// subtractExistingIssues removes errors and warnings from b's results that are also found in a's results. +func subtractExistingIssues(b, a map[string]map[string][]validations.ComparisonResult) { + sliceToMapByName := func(in []validations.ComparisonResult) map[string]*validations.ComparisonResult { + out := make(map[string]*validations.ComparisonResult, len(in)) + + for i := range in { + v := &in[i] + out[v.Name] = v + } + + return out + } + + for versionPair, bVersionPairResults := range b { + aVersionPairResults, ok := a[versionPair] + if !ok { + // If the version pair is not found in a, that means + // b introduced a new version, so we'll keep _all_ + // of the comparison results for this pair + continue + } + + for fieldPath, bFieldPathResults := range bVersionPairResults { + aFieldPathResults, ok := aVersionPairResults[fieldPath] + if !ok { + // If this field path is not found in a's results + // for this version pair, that means b introduced a new field + // in an existing schema, so we'll keep _all_ of the comparison + // results for this field path. + continue + } + + aResultMap := sliceToMapByName(aFieldPathResults) + bResultMap := sliceToMapByName(bFieldPathResults) + + for validationName, bValidationResult := range bResultMap { + aValidationResult, ok := aResultMap[validationName] + if !ok { + // If a's results do not include results for this validation, + // that means we ran a new validation for b that we did not + // run for a. We never intend to do that, so if that is somehow + // the case, let's panic and say what our programmer intent was. + panic(fmt.Sprintf("Validation %q not found in a's results for version pair %q. This should never happen because this validator uses the same validation configuration for CRDs a and b.", validationName, versionPair)) + } + + bValidationResult.Errors = slices.DeleteFunc(bValidationResult.Errors, func(bErr string) bool { + return slices.Contains(aValidationResult.Errors, bErr) + }) + if len(bValidationResult.Errors) == 0 { + bValidationResult.Errors = nil + } + + bValidationResult.Warnings = slices.DeleteFunc(bValidationResult.Warnings, func(bWarn string) bool { + return slices.Contains(aValidationResult.Warnings, bWarn) + }) + if len(bValidationResult.Warnings) == 0 { + bValidationResult.Warnings = nil + } + } + } + } }